static int ring_write(struct ring *r, void *src, int length) { int full = 0; for (;;) { /* free space may be split over the end of the buffer */ int first_chunk_size = (r->usable_buffer_space-r->prod_indx); int second_chunk_size = (r->cons_indx>=r->prod_indx)? (r->cons_indx) : 0; int l, fl, sl; /* full? */ if (RING_FREE_BYTES==0) { /*XXX hardcoded*/ if (full>=100) { errno = EINTR; return -1; } /*XXX hardcoded */ usleep(1000); /* should return in 100ms max; definitely not midstream */ full++; continue; } /* calculate lengths in case of a split buffer */ l = PA_MIN((int)RING_FREE_BYTES, length); fl = PA_MIN(l, first_chunk_size); sl = PA_MIN(l-fl, second_chunk_size); memcpy(r->buffer+r->prod_indx, src, fl); if (sl) memcpy(r->buffer, ((char*)src)+fl, sl); r->prod_indx = (r->prod_indx+fl+sl) % r->usable_buffer_space; return sl+fl; } }
/* Called from I/O thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; float *src, *dst; size_t fs; unsigned n, h, c; pa_memchunk tchunk; pa_sink_input_assert_ref(i); pa_assert(chunk); pa_assert_se(u = i->userdata); /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); while (pa_memblockq_peek(u->memblockq, &tchunk) < 0) { pa_memchunk nchunk; pa_sink_render(u->sink, nbytes, &nchunk); pa_memblockq_push(u->memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } tchunk.length = PA_MIN(nbytes, tchunk.length); pa_assert(tchunk.length > 0); fs = pa_frame_size(&i->sample_spec); n = (unsigned) (PA_MIN(tchunk.length, u->block_size) / fs); pa_assert(n > 0); chunk->index = 0; chunk->length = n*fs; chunk->memblock = pa_memblock_new(i->sink->core->mempool, chunk->length); pa_memblockq_drop(u->memblockq, chunk->length); src = (float*) ((uint8_t*) pa_memblock_acquire(tchunk.memblock) + tchunk.index); dst = (float*) pa_memblock_acquire(chunk->memblock); for (h = 0; h < (u->channels / u->max_ladspaport_count); h++) { for (c = 0; c < u->input_count; c++) pa_sample_clamp(PA_SAMPLE_FLOAT32NE, u->input[c], sizeof(float), src+ h*u->max_ladspaport_count + c, u->channels*sizeof(float), n); u->descriptor->run(u->handle[h], n); for (c = 0; c < u->output_count; c++) pa_sample_clamp(PA_SAMPLE_FLOAT32NE, dst + h*u->max_ladspaport_count + c, u->channels*sizeof(float), u->output[c], sizeof(float), n); } pa_memblock_release(tchunk.memblock); pa_memblock_release(chunk->memblock); pa_memblock_unref(tchunk.memblock); return 0; }
/* Called from thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) { connection *c; pa_sink_input_assert_ref(i); c = CONNECTION(i->userdata); connection_assert_ref(c); pa_assert(chunk); if (pa_memblockq_peek(c->input_memblockq, chunk) < 0) { c->playback.underrun = true; if (c->dead && pa_sink_input_safe_to_remove(i)) pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(c), CONNECTION_MESSAGE_UNLINK_CONNECTION, NULL, 0, NULL, NULL); return -1; } else { size_t m; chunk->length = PA_MIN(length, chunk->length); c->playback.underrun = false; pa_memblockq_drop(c->input_memblockq, chunk->length); m = pa_memblockq_pop_missing(c->input_memblockq); if (m > 0) if (pa_atomic_add(&c->playback.missing, (int) m) <= 0) pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(c), CONNECTION_MESSAGE_REQUEST_DATA, NULL, 0, NULL, NULL); return 0; } }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { struct userdata *u; size_t amount = 0; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (u->sink->thread_info.rewind_nbytes > 0) { size_t max_rewrite; max_rewrite = nbytes + pa_memblockq_get_length(u->memblockq); amount = PA_MIN(u->sink->thread_info.rewind_nbytes, max_rewrite); u->sink->thread_info.rewind_nbytes = 0; if (amount > 0) { unsigned c; pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, TRUE); pa_log_debug("Resetting plugin"); /* Reset the plugin */ if (u->descriptor->deactivate) for (c = 0; c < (u->channels / u->max_ladspaport_count); c++) u->descriptor->deactivate(u->handle[c]); if (u->descriptor->activate) for (c = 0; c < (u->channels / u->max_ladspaport_count); c++) u->descriptor->activate(u->handle[c]); } } pa_sink_process_rewind(u->sink, amount); pa_memblockq_rewind(u->memblockq, nbytes); }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { struct userdata *u; size_t amount = 0; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (u->sink->thread_info.rewind_nbytes > 0) { size_t max_rewrite; max_rewrite = nbytes * u->sink_fs / u->fs + pa_memblockq_get_length(u->memblockq); amount = PA_MIN(u->sink->thread_info.rewind_nbytes * u->sink_fs / u->fs, max_rewrite); u->sink->thread_info.rewind_nbytes = 0; if (amount > 0) { pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, true); /* Reset the input buffer */ memset(u->input_buffer, 0, u->hrir_samples * u->sink_fs); u->input_buffer_offset = 0; } } pa_sink_process_rewind(u->sink, amount); pa_memblockq_rewind(u->memblockq, nbytes * u->sink_fs / u->fs); }
static void *pa_ringbuffer_begin_write(pa_ringbuffer *r, int *count) { int c = pa_atomic_load(r->count); *count = PA_MIN(r->capacity - r->writeindex, r->capacity - c); return r->memory + r->writeindex; }
static void process_rewind(struct userdata *u) { size_t rewind_nbytes; pa_assert(u); if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) { pa_sink_process_rewind(u->sink, 0); return; } rewind_nbytes = u->sink->thread_info.rewind_nbytes; if (rewind_nbytes > 0) { pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes); rewind_nbytes = PA_MIN(u->memchunk.length, rewind_nbytes); u->memchunk.length -= rewind_nbytes; if (u->memchunk.length <= 0 && u->memchunk.memblock) { pa_memblock_unref(u->memchunk.memblock); pa_memchunk_reset(&u->memchunk); } pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes); } pa_sink_process_rewind(u->sink, rewind_nbytes); }
/* Called from output thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; pa_sink_input_assert_ref(i); pa_sink_input_assert_io_context(i); pa_assert_se(u = i->userdata); pa_assert(chunk); u->in_pop = TRUE; while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0) ; u->in_pop = FALSE; if (pa_memblockq_peek(u->memblockq, chunk) < 0) { pa_log_info("Could not peek into queue"); return -1; } chunk->length = PA_MIN(chunk->length, nbytes); pa_memblockq_drop(u->memblockq, chunk->length); update_min_memblockq_length(u); return 0; }
pa_srbchannel* pa_srbchannel_new(pa_mainloop_api *m, pa_mempool *p) { int capacity; int readfd; struct srbheader *srh; pa_srbchannel* sr = pa_xmalloc0(sizeof(pa_srbchannel)); sr->mainloop = m; sr->memblock = pa_memblock_new_pool(p, -1); if (!sr->memblock) goto fail; srh = pa_memblock_acquire(sr->memblock); pa_zero(*srh); sr->rb_read.memory = (uint8_t*) srh + PA_ALIGN(sizeof(*srh)); srh->readbuf_offset = sr->rb_read.memory - (uint8_t*) srh; capacity = (pa_memblock_get_length(sr->memblock) - srh->readbuf_offset) / 2; sr->rb_write.memory = PA_ALIGN_PTR(sr->rb_read.memory + capacity); srh->writebuf_offset = sr->rb_write.memory - (uint8_t*) srh; capacity = PA_MIN(capacity, srh->writebuf_offset - srh->readbuf_offset); pa_log_debug("SHM block is %d bytes, ringbuffer capacity is 2 * %d bytes", (int) pa_memblock_get_length(sr->memblock), capacity); srh->capacity = sr->rb_read.capacity = sr->rb_write.capacity = capacity; sr->rb_read.count = &srh->read_count; sr->rb_write.count = &srh->write_count; sr->sem_read = pa_fdsem_new_shm(&srh->read_semdata); if (!sr->sem_read) goto fail; sr->sem_write = pa_fdsem_new_shm(&srh->write_semdata); if (!sr->sem_write) goto fail; readfd = pa_fdsem_get(sr->sem_read); #ifdef DEBUG_SRBCHANNEL pa_log("Enabling io event on fd %d", readfd); #endif sr->read_event = m->io_new(m, readfd, PA_IO_EVENT_INPUT, semread_cb, sr); m->io_enable(sr->read_event, PA_IO_EVENT_INPUT); return sr; fail: pa_srbchannel_free(sr); return NULL; }
static char* get_backtrace(unsigned show_nframes) { void* trace[32]; int n_frames; char **symbols, *e, *r; unsigned j, n, s; size_t a; pa_assert(show_nframes > 0); n_frames = backtrace(trace, PA_ELEMENTSOF(trace)); if (n_frames <= 0) return NULL; symbols = backtrace_symbols(trace, n_frames); if (!symbols) return NULL; s = skip_backtrace; n = PA_MIN((unsigned) n_frames, s + show_nframes); a = 4; for (j = s; j < n; j++) { if (j > s) a += 2; a += strlen(pa_path_get_filename(symbols[j])); } r = pa_xnew(char, a); strcpy(r, " ("); e = r + 2; for (j = s; j < n; j++) { const char *sym; if (j > s) { strcpy(e, "<<"); e += 2; } sym = pa_path_get_filename(symbols[j]); strcpy(e, sym); e += strlen(sym); } strcpy(e, ")"); free(symbols); return r; }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { size_t amount = 0; struct userdata *u; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (u->sink->thread_info.rewind_nbytes > 0) { amount = PA_MIN(u->sink->thread_info.rewind_nbytes, nbytes); u->sink->thread_info.rewind_nbytes = 0; } pa_sink_process_rewind(u->sink, amount); }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { size_t amount = 0; struct userdata *u; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); /* If the sink is not yet linked, there is nothing to rewind */ if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) return; if (u->sink->thread_info.rewind_nbytes > 0) { amount = PA_MIN(u->sink->thread_info.rewind_nbytes, nbytes); u->sink->thread_info.rewind_nbytes = 0; } pa_sink_process_rewind(u->sink, amount); }
static pa_memblock *silence_memblock_new(pa_mempool *pool, uint8_t c) { pa_memblock *b; size_t length; void *data; pa_assert(pool); length = PA_MIN(pa_mempool_block_size_max(pool), PA_SILENCE_MAX); b = pa_memblock_new(pool, length); data = pa_memblock_acquire(b); memset(data, c, length); pa_memblock_release(b); pa_memblock_set_is_silence(b, true); return b; }
static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { memblockq_stream *u; pa_sink_input_assert_ref(i); pa_assert(chunk); u = MEMBLOCKQ_STREAM(i->userdata); memblockq_stream_assert_ref(u); if (!u->memblockq) return -1; if (pa_memblockq_peek(u->memblockq, chunk) < 0) { if (pa_sink_input_safe_to_remove(i)) { pa_memblockq_free(u->memblockq); u->memblockq = NULL; pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(u), MEMBLOCKQ_STREAM_MESSAGE_UNLINK, NULL, 0, NULL, NULL); } return -1; } /* If there's no memblock, there's going to be data in the memblockq after * a gap with length chunk->length. Drop the the gap and peek the actual * data. There should always be some data coming - hence the assert. The * gap will occur if the memblockq is rewound beyond index 0.*/ if (!chunk->memblock) { pa_memblockq_drop(u->memblockq, chunk->length); pa_assert_se(pa_memblockq_peek(u->memblockq, chunk) >= 0); } chunk->length = PA_MIN(chunk->length, nbytes); pa_memblockq_drop(u->memblockq, chunk->length); return 0; }
static void process_render(struct userdata *u, pa_usec_t now) { pa_assert(u); while (u->timestamp < now + u->block_usec) { pa_memchunk chunk; size_t k; k = pa_usec_to_bytes_round_up(now + u->block_usec - u->timestamp, &u->source->sample_spec); chunk = u->memchunk; chunk.index += u->peek_index; chunk.length = PA_MIN(chunk.length - u->peek_index, k); /* pa_log_debug("posting %lu", (unsigned long) chunk.length); */ pa_source_post(u->source, &chunk); u->peek_index += chunk.length; while (u->peek_index >= u->memchunk.length) u->peek_index -= u->memchunk.length; u->timestamp += pa_bytes_to_usec(chunk.length, &u->source->sample_spec); } }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { struct userdata *u; size_t amount = 0; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (u->sink->thread_info.rewind_nbytes > 0) { size_t max_rewrite; max_rewrite = nbytes + pa_memblockq_get_length(u->memblockq); amount = PA_MIN(u->sink->thread_info.rewind_nbytes, max_rewrite); u->sink->thread_info.rewind_nbytes = 0; if (amount > 0) { pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, TRUE); /* (5) PUT YOUR CODE HERE TO RESET YOUR FILTER */ } } pa_sink_process_rewind(u->sink, amount); pa_memblockq_rewind(u->memblockq, nbytes); }
int main(int argc, char*argv[]) { pid_t pid; int fd = -1; int ret = 1, i; struct sockaddr_un sa; char *ibuf = NULL; char *obuf = NULL; size_t buf_size, ibuf_size, ibuf_index, ibuf_length, obuf_size, obuf_index, obuf_length; char *cli; bool ibuf_eof, obuf_eof, ibuf_closed, obuf_closed; struct pollfd pollfd[3]; struct pollfd *watch_socket, *watch_stdin, *watch_stdout; int stdin_type = 0, stdout_type = 0, fd_type = 0; char *bn = NULL; int c; static const struct option long_options[] = { {"version", 0, NULL, ARG_VERSION}, {"help", 0, NULL, 'h'}, {NULL, 0, NULL, 0} }; setlocale(LC_ALL, ""); #ifdef ENABLE_NLS bindtextdomain(GETTEXT_PACKAGE, PULSE_LOCALEDIR); #endif bn = pa_path_get_filename(argv[0]); while ((c = getopt_long(argc, argv, "h", long_options, NULL)) != -1) { switch (c) { case 'h' : help(bn); ret = 0; goto quit; case ARG_VERSION: printf(_("pacmd %s\n" "Compiled with libpulse %s\n" "Linked with libpulse %s\n"), PACKAGE_VERSION, pa_get_headers_version(), pa_get_library_version()); ret = 0; goto quit; default: goto quit; } } if (pa_pid_file_check_running(&pid, "pulseaudio") < 0) { pa_log(_("No PulseAudio daemon running, or not running as session daemon.")); goto quit; } if ((fd = pa_socket_cloexec(PF_UNIX, SOCK_STREAM, 0)) < 0) { pa_log(_("socket(PF_UNIX, SOCK_STREAM, 0): %s"), strerror(errno)); goto quit; } pa_zero(sa); sa.sun_family = AF_UNIX; if (!(cli = pa_runtime_path("cli"))) goto quit; pa_strlcpy(sa.sun_path, cli, sizeof(sa.sun_path)); pa_xfree(cli); for (i = 0; i < 5; i++) { int r; if ((r = connect(fd, (struct sockaddr*) &sa, sizeof(sa))) < 0 && (errno != ECONNREFUSED && errno != ENOENT)) { pa_log(_("connect(): %s"), strerror(errno)); goto quit; } if (r >= 0) break; if (pa_pid_file_kill(SIGUSR2, NULL, "pulseaudio") < 0) { pa_log(_("Failed to kill PulseAudio daemon.")); goto quit; } pa_msleep(300); } if (i >= 5) { pa_log(_("Daemon not responding.")); goto quit; } buf_size = pa_pipe_buf(fd); ibuf_size = PA_MIN(buf_size, pa_pipe_buf(STDIN_FILENO)); ibuf = pa_xmalloc(ibuf_size); obuf_size = PA_MIN(buf_size, pa_pipe_buf(STDOUT_FILENO)); obuf = pa_xmalloc(obuf_size); ibuf_index = ibuf_length = obuf_index = obuf_length = 0; ibuf_eof = obuf_eof = ibuf_closed = obuf_closed = false; if (argc > 1) { for (i = 1; i < argc; i++) { size_t k; k = PA_MIN(ibuf_size - ibuf_length, strlen(argv[i])); memcpy(ibuf + ibuf_length, argv[i], k); ibuf_length += k; if (ibuf_length < ibuf_size) { ibuf[ibuf_length] = i < argc-1 ? ' ' : '\n'; ibuf_length++; } } ibuf_eof = true; } if (!ibuf_eof && isatty(STDIN_FILENO)) { /* send hello to enable interactive mode (welcome message, prompt) */ if (pa_write(fd, "hello\n", 6, &fd_type) < 0) { pa_log(_("write(): %s"), strerror(errno)); goto quit; } } for (;;) { struct pollfd *p; if (ibuf_eof && obuf_eof && ibuf_length <= 0 && obuf_length <= 0) break; if (ibuf_length <= 0 && ibuf_eof && !ibuf_closed) { shutdown(fd, SHUT_WR); ibuf_closed = true; } if (obuf_length <= 0 && obuf_eof && !obuf_closed) { shutdown(fd, SHUT_RD); obuf_closed = true; } pa_zero(pollfd); p = pollfd; if (ibuf_length > 0 || (!obuf_eof && obuf_length <= 0)) { watch_socket = p++; watch_socket->fd = fd; watch_socket->events = (ibuf_length > 0 ? POLLOUT : 0) | (!obuf_eof && obuf_length <= 0 ? POLLIN : 0); } else watch_socket = NULL; if (!ibuf_eof && ibuf_length <= 0) { watch_stdin = p++; watch_stdin->fd = STDIN_FILENO; watch_stdin->events = POLLIN; } else watch_stdin = NULL; if (obuf_length > 0) { watch_stdout = p++; watch_stdout->fd = STDOUT_FILENO; watch_stdout->events = POLLOUT; } else watch_stdout = NULL; if (pa_poll(pollfd, p-pollfd, -1) < 0) { if (errno == EINTR) continue; pa_log(_("poll(): %s"), strerror(errno)); goto quit; } if (watch_stdin) { if (watch_stdin->revents & POLLIN) { ssize_t r; pa_assert(ibuf_length <= 0); if ((r = pa_read(STDIN_FILENO, ibuf, ibuf_size, &stdin_type)) <= 0) { if (r < 0) { pa_log(_("read(): %s"), strerror(errno)); goto quit; } ibuf_eof = true; } else { ibuf_length = (size_t) r; ibuf_index = 0; } } else if (watch_stdin->revents & POLLHUP) ibuf_eof = true; } if (watch_socket) { if (watch_socket->revents & POLLIN) { ssize_t r; pa_assert(obuf_length <= 0); if ((r = pa_read(fd, obuf, obuf_size, &fd_type)) <= 0) { if (r < 0) { pa_log(_("read(): %s"), strerror(errno)); goto quit; } obuf_eof = true; } else { obuf_length = (size_t) r; obuf_index = 0; } } else if (watch_socket->revents & POLLHUP) obuf_eof = true; } if (watch_stdout) { if (watch_stdout->revents & POLLHUP) { obuf_eof = true; obuf_length = 0; } else if (watch_stdout->revents & POLLOUT) { ssize_t r; pa_assert(obuf_length > 0); if ((r = pa_write(STDOUT_FILENO, obuf + obuf_index, obuf_length, &stdout_type)) < 0) { pa_log(_("write(): %s"), strerror(errno)); goto quit; } obuf_length -= (size_t) r; obuf_index += obuf_index; } } if (watch_socket) { if (watch_socket->revents & POLLHUP) { ibuf_eof = true; ibuf_length = 0; } if (watch_socket->revents & POLLOUT) { ssize_t r; pa_assert(ibuf_length > 0); if ((r = pa_write(fd, ibuf + ibuf_index, ibuf_length, &fd_type)) < 0) { pa_log(_("write(): %s"), strerror(errno)); goto quit; } ibuf_length -= (size_t) r; ibuf_index += obuf_index; } } } ret = 0; quit: if (fd >= 0) pa_close(fd); pa_xfree(obuf); pa_xfree(ibuf); return ret; }
PaError PaUnixThread_New( PaUnixThread* self, void* (*threadFunc)( void* ), void* threadArg, PaTime waitForChild, int rtSched ) { PaError result = paNoError; pthread_attr_t attr; int started = 0; memset( self, 0, sizeof (PaUnixThread) ); PaUnixMutex_Initialize( &self->mtx ); PA_ASSERT_CALL( pthread_cond_init( &self->cond, NULL ), 0 ); self->parentWaiting = 0 != waitForChild; /* Spawn thread */ /* Temporarily disabled since we should test during configuration for presence of required mman.h header */ #if 0 #if defined _POSIX_MEMLOCK && (_POSIX_MEMLOCK != -1) if( rtSched ) { if( mlockall( MCL_CURRENT | MCL_FUTURE ) < 0 ) { int savedErrno = errno; /* In case errno gets overwritten */ assert( savedErrno != EINVAL ); /* Most likely a programmer error */ PA_UNLESS( (savedErrno == EPERM), paInternalError ); PA_DEBUG(( "%s: Failed locking memory\n", __FUNCTION__ )); } else PA_DEBUG(( "%s: Successfully locked memory\n", __FUNCTION__ )); } #endif #endif PA_UNLESS( !pthread_attr_init( &attr ), paInternalError ); /* Priority relative to other processes */ PA_UNLESS( !pthread_attr_setscope( &attr, PTHREAD_SCOPE_SYSTEM ), paInternalError ); PA_UNLESS( !pthread_create( &self->thread, &attr, threadFunc, threadArg ), paInternalError ); started = 1; if( rtSched ) { #if 0 if( self->useWatchdog ) { int err; struct sched_param wdSpm = { 0 }; /* Launch watchdog, watchdog sets callback thread priority */ int prio = PA_MIN( self->rtPrio + 4, sched_get_priority_max( SCHED_FIFO ) ); wdSpm.sched_priority = prio; PA_UNLESS( !pthread_attr_init( &attr ), paInternalError ); PA_UNLESS( !pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED ), paInternalError ); PA_UNLESS( !pthread_attr_setscope( &attr, PTHREAD_SCOPE_SYSTEM ), paInternalError ); PA_UNLESS( !pthread_attr_setschedpolicy( &attr, SCHED_FIFO ), paInternalError ); PA_UNLESS( !pthread_attr_setschedparam( &attr, &wdSpm ), paInternalError ); if( (err = pthread_create( &self->watchdogThread, &attr, &WatchdogFunc, self )) ) { PA_UNLESS( err == EPERM, paInternalError ); /* Permission error, go on without realtime privileges */ PA_DEBUG(( "Failed bumping priority\n" )); } else { int policy; self->watchdogRunning = 1; PA_ENSURE_SYSTEM( pthread_getschedparam( self->watchdogThread, &policy, &wdSpm ), 0 ); /* Check if priority is right, policy could potentially differ from SCHED_FIFO (but that's alright) */ if( wdSpm.sched_priority != prio ) { PA_DEBUG(( "Watchdog priority not set correctly (%d)\n", wdSpm.sched_priority )); PA_ENSURE( paInternalError ); } } } else #endif PA_ENSURE( BoostPriority( self ) ); { int policy; struct sched_param spm; pthread_getschedparam(self->thread, &policy, &spm); } } if( self->parentWaiting ) { PaTime till; struct timespec ts; int res = 0; PaTime now; PA_ENSURE( PaUnixMutex_Lock( &self->mtx ) ); /* Wait for stream to be started */ now = PaUtil_GetTime(); till = now + waitForChild; while( self->parentWaiting && !res ) { if( waitForChild > 0 ) { ts.tv_sec = (time_t) floor( till ); ts.tv_nsec = (long) ((till - floor( till )) * 1e9); res = pthread_cond_timedwait( &self->cond, &self->mtx.mtx, &ts ); } else { res = pthread_cond_wait( &self->cond, &self->mtx.mtx ); } } PA_ENSURE( PaUnixMutex_Unlock( &self->mtx ) ); PA_UNLESS( !res || ETIMEDOUT == res, paInternalError ); PA_DEBUG(( "%s: Waited for %g seconds for stream to start\n", __FUNCTION__, PaUtil_GetTime() - now )); if( ETIMEDOUT == res ) { PA_ENSURE( paTimedOut ); } } end: return result; error: if( started ) { PaUnixThread_Terminate( self, 0, NULL ); } goto end; }
/* Called from I/O thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; float *src, *dst; unsigned n; pa_memchunk tchunk; unsigned j, k, l; float sum_right, sum_left; float current_sample; pa_sink_input_assert_ref(i); pa_assert(chunk); pa_assert_se(u = i->userdata); /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); while (pa_memblockq_peek(u->memblockq, &tchunk) < 0) { pa_memchunk nchunk; pa_sink_render(u->sink, nbytes * u->sink_fs / u->fs, &nchunk); pa_memblockq_push(u->memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } tchunk.length = PA_MIN(nbytes * u->sink_fs / u->fs, tchunk.length); pa_assert(tchunk.length > 0); n = (unsigned) (tchunk.length / u->sink_fs); pa_assert(n > 0); chunk->index = 0; chunk->length = n * u->fs; chunk->memblock = pa_memblock_new(i->sink->core->mempool, chunk->length); pa_memblockq_drop(u->memblockq, n * u->sink_fs); src = pa_memblock_acquire_chunk(&tchunk); dst = pa_memblock_acquire(chunk->memblock); for (l = 0; l < n; l++) { memcpy(((char*) u->input_buffer) + u->input_buffer_offset * u->sink_fs, ((char *) src) + l * u->sink_fs, u->sink_fs); sum_right = 0; sum_left = 0; /* fold the input buffer with the impulse response */ for (j = 0; j < u->hrir_samples; j++) { for (k = 0; k < u->channels; k++) { current_sample = u->input_buffer[((u->input_buffer_offset + j) % u->hrir_samples) * u->channels + k]; sum_left += current_sample * u->hrir_data[j * u->hrir_channels + u->mapping_left[k]]; sum_right += current_sample * u->hrir_data[j * u->hrir_channels + u->mapping_right[k]]; } } dst[2 * l] = PA_CLAMP_UNLIKELY(sum_left, -1.0f, 1.0f); dst[2 * l + 1] = PA_CLAMP_UNLIKELY(sum_right, -1.0f, 1.0f); u->input_buffer_offset--; if (u->input_buffer_offset < 0) u->input_buffer_offset += u->hrir_samples; } pa_memblock_release(tchunk.memblock); pa_memblock_release(chunk->memblock); pa_memblock_unref(tchunk.memblock); return 0; }
/* Called from I/O thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; float *src, *dst; size_t fs; unsigned n, c; pa_memchunk tchunk; pa_usec_t current_latency PA_GCC_UNUSED; pa_sink_input_assert_ref(i); pa_assert(chunk); pa_assert_se(u = i->userdata); /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); /* (1) IF YOU NEED A FIXED BLOCK SIZE USE * pa_memblockq_peek_fixed_size() HERE INSTEAD. NOTE THAT FILTERS * WHICH CAN DEAL WITH DYNAMIC BLOCK SIZES ARE HIGHLY * PREFERRED. */ while (pa_memblockq_peek(u->memblockq, &tchunk) < 0) { pa_memchunk nchunk; pa_sink_render(u->sink, nbytes, &nchunk); pa_memblockq_push(u->memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } /* (2) IF YOU NEED A FIXED BLOCK SIZE, THIS NEXT LINE IS NOT * NECESSARY */ tchunk.length = PA_MIN(nbytes, tchunk.length); pa_assert(tchunk.length > 0); fs = pa_frame_size(&i->sample_spec); n = (unsigned) (tchunk.length / fs); pa_assert(n > 0); chunk->index = 0; chunk->length = n*fs; chunk->memblock = pa_memblock_new(i->sink->core->mempool, chunk->length); pa_memblockq_drop(u->memblockq, chunk->length); src = pa_memblock_acquire_chunk(&tchunk); dst = pa_memblock_acquire(chunk->memblock); /* (3) PUT YOUR CODE HERE TO DO SOMETHING WITH THE DATA */ /* As an example, copy input to output */ for (c = 0; c < u->channels; c++) { pa_sample_clamp(PA_SAMPLE_FLOAT32NE, dst+c, u->channels * sizeof(float), src+c, u->channels * sizeof(float), n); } pa_memblock_release(tchunk.memblock); pa_memblock_release(chunk->memblock); pa_memblock_unref(tchunk.memblock); /* (4) IF YOU NEED THE LATENCY FOR SOMETHING ACQUIRE IT LIKE THIS: */ current_latency = /* Get the latency of the master sink */ pa_sink_get_latency_within_thread(i->sink) + /* Add the latency internal to our sink input on top */ pa_bytes_to_usec(pa_memblockq_get_length(i->thread_info.render_memblockq), &i->sink->sample_spec); return 0; }
static void thread_func(void *userdata) { struct userdata *u = userdata; bool timer_elapsed = false; size_t max_block_size; pa_assert(u); pa_log_debug("Thread starting up"); if (u->core->realtime_scheduling) pa_thread_make_realtime(u->core->realtime_priority); pa_thread_mq_install(&u->thread_mq); max_block_size = pa_frame_align(pa_mempool_block_size_max(u->core->mempool), &u->source->sample_spec); u->timestamp = pa_rtclock_now(); for (;;) { int ret; /* Generate some null data */ if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) { pa_usec_t now; pa_memchunk chunk; now = pa_rtclock_now(); if (timer_elapsed && (chunk.length = pa_usec_to_bytes(now - u->timestamp, &u->source->sample_spec)) > 0) { chunk.length = PA_MIN(max_block_size, chunk.length); chunk.memblock = pa_memblock_new(u->core->mempool, chunk.length); chunk.index = 0; pa_silence_memchunk(&chunk, &u->source->sample_spec); pa_source_post(u->source, &chunk); pa_memblock_unref(chunk.memblock); u->timestamp += pa_bytes_to_usec(chunk.length, &u->source->sample_spec); } pa_rtpoll_set_timer_absolute(u->rtpoll, u->timestamp + u->block_usec); } else pa_rtpoll_set_timer_disabled(u->rtpoll); /* Hmm, nothing to do. Let's sleep */ if ((ret = pa_rtpoll_run(u->rtpoll)) < 0) goto fail; timer_elapsed = pa_rtpoll_timer_elapsed(u->rtpoll); if (ret == 0) goto finish; } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }
/* Called from IO thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) { file_stream *u; pa_sink_input_assert_ref(i); pa_assert(chunk); u = FILE_STREAM(i->userdata); file_stream_assert_ref(u); if (!u->memblockq) return -1; for (;;) { pa_memchunk tchunk; size_t fs; void *p; sf_count_t n; if (pa_memblockq_peek(u->memblockq, chunk) >= 0) { chunk->length = PA_MIN(chunk->length, length); pa_memblockq_drop(u->memblockq, chunk->length); return 0; } if (!u->sndfile) break; tchunk.memblock = pa_memblock_new(i->sink->core->mempool, length); tchunk.index = 0; p = pa_memblock_acquire(tchunk.memblock); if (u->readf_function) { fs = pa_frame_size(&i->sample_spec); n = u->readf_function(u->sndfile, p, (sf_count_t) (length/fs)); } else { fs = 1; n = sf_read_raw(u->sndfile, p, (sf_count_t) length); } pa_memblock_release(tchunk.memblock); if (n <= 0) { pa_memblock_unref(tchunk.memblock); sf_close(u->sndfile); u->sndfile = NULL; break; } tchunk.length = (size_t) n * fs; pa_memblockq_push(u->memblockq, &tchunk); pa_memblock_unref(tchunk.memblock); } if (pa_sink_input_safe_to_remove(i)) { pa_memblockq_free(u->memblockq); u->memblockq = NULL; pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(u), FILE_STREAM_MESSAGE_UNLINK, NULL, 0, NULL, NULL); } return -1; }
static void thread_func(void *userdata) { struct userdata *u = userdata; unsigned short revents = 0; int ret, err; audio_info_t info; pa_assert(u); pa_log_debug("Thread starting up"); if (u->core->realtime_scheduling) pa_make_realtime(u->core->realtime_priority); pa_thread_mq_install(&u->thread_mq); pa_smoother_set_time_offset(u->smoother, pa_rtclock_now()); for (;;) { /* Render some data and write it to the dsp */ if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) process_rewind(u); if (u->sink && PA_SINK_IS_OPENED(u->sink->thread_info.state)) { pa_usec_t xtime0, ysleep_interval, xsleep_interval; uint64_t buffered_bytes; err = ioctl(u->fd, AUDIO_GETINFO, &info); if (err < 0) { pa_log("AUDIO_GETINFO ioctl failed: %s", pa_cstrerror(errno)); goto fail; } if (info.play.error) { pa_log_debug("buffer under-run!"); AUDIO_INITINFO(&info); info.play.error = 0; if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno)); pa_smoother_reset(u->smoother, pa_rtclock_now(), true); } for (;;) { void *p; ssize_t w; size_t len; int write_type = 1; /* * Since we cannot modify the size of the output buffer we fake it * by not filling it more than u->buffer_size. */ xtime0 = pa_rtclock_now(); buffered_bytes = get_playback_buffered_bytes(u); if (buffered_bytes >= (uint64_t)u->buffer_size) break; len = u->buffer_size - buffered_bytes; len -= len % u->frame_size; if (len < (size_t) u->minimum_request) break; if (!u->memchunk.length) pa_sink_render(u->sink, u->sink->thread_info.max_request, &u->memchunk); len = PA_MIN(u->memchunk.length, len); p = pa_memblock_acquire(u->memchunk.memblock); w = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, len, &write_type); pa_memblock_release(u->memchunk.memblock); if (w <= 0) { if (errno == EINTR) { continue; } else if (errno == EAGAIN) { /* We may have realtime priority so yield the CPU to ensure that fd can become writable again. */ pa_log_debug("EAGAIN with %llu bytes buffered.", buffered_bytes); break; } else { pa_log("Failed to write data to DSP: %s", pa_cstrerror(errno)); goto fail; } } else { pa_assert(w % u->frame_size == 0); u->written_bytes += w; u->memchunk.index += w; u->memchunk.length -= w; if (u->memchunk.length <= 0) { pa_memblock_unref(u->memchunk.memblock); pa_memchunk_reset(&u->memchunk); } } } ysleep_interval = pa_bytes_to_usec(buffered_bytes / 2, &u->sink->sample_spec); xsleep_interval = pa_smoother_translate(u->smoother, xtime0, ysleep_interval); pa_rtpoll_set_timer_absolute(u->rtpoll, xtime0 + PA_MIN(xsleep_interval, ysleep_interval)); } else pa_rtpoll_set_timer_disabled(u->rtpoll); /* Try to read some data and pass it on to the source driver */ if (u->source && PA_SOURCE_IS_OPENED(u->source->thread_info.state) && (revents & POLLIN)) { pa_memchunk memchunk; void *p; ssize_t r; size_t len; err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); if (info.record.error) { pa_log_debug("buffer overflow!"); AUDIO_INITINFO(&info); info.record.error = 0; if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno)); } err = ioctl(u->fd, I_NREAD, &len); pa_assert(err >= 0); if (len > 0) { memchunk.memblock = pa_memblock_new(u->core->mempool, len); pa_assert(memchunk.memblock); p = pa_memblock_acquire(memchunk.memblock); r = pa_read(u->fd, p, len, NULL); pa_memblock_release(memchunk.memblock); if (r < 0) { pa_memblock_unref(memchunk.memblock); if (errno == EAGAIN) break; else { pa_log("Failed to read data from DSP: %s", pa_cstrerror(errno)); goto fail; } } else { u->read_bytes += r; memchunk.index = 0; memchunk.length = r; pa_source_post(u->source, &memchunk); pa_memblock_unref(memchunk.memblock); revents &= ~POLLIN; } } } if (u->rtpoll_item) { struct pollfd *pollfd; pa_assert(u->fd >= 0); pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); pollfd->events = (u->source && PA_SOURCE_IS_OPENED(u->source->thread_info.state)) ? POLLIN : 0; } /* Hmm, nothing to do. Let's sleep */ if ((ret = pa_rtpoll_run(u->rtpoll, true)) < 0) goto fail; if (ret == 0) goto finish; if (u->rtpoll_item) { struct pollfd *pollfd; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); if (pollfd->revents & ~(POLLOUT|POLLIN)) { pa_log("DSP shutdown."); goto fail; } revents = pollfd->revents; } else revents = 0; } fail: /* We have to continue processing messages until we receive the * SHUTDOWN message */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }