JNIEXPORT void JNICALL Java_org_jitsi_impl_neomedia_pulseaudio_PA_threaded_1mainloop_1wait (JNIEnv *env, jclass clazz, jlong m) { pa_threaded_mainloop_wait((pa_threaded_mainloop *) (intptr_t) m); }
static DECLCALLBACK(int) drvHostPulseAudioInit(PPDMIHOSTAUDIO pInterface) { NOREF(pInterface); LogFlowFuncEnter(); int rc = audioLoadPulseLib(); if (RT_FAILURE(rc)) { LogRel(("PulseAudio: Failed to load the PulseAudio shared library! Error %Rrc\n", rc)); return rc; } bool fLocked = false; do { if (!(g_pMainLoop = pa_threaded_mainloop_new())) { LogRel(("PulseAudio: Failed to allocate main loop: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_NO_MEMORY; break; } if (!(g_pContext = pa_context_new(pa_threaded_mainloop_get_api(g_pMainLoop), "VirtualBox"))) { LogRel(("PulseAudio: Failed to allocate context: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_NO_MEMORY; break; } if (pa_threaded_mainloop_start(g_pMainLoop) < 0) { LogRel(("PulseAudio: Failed to start threaded mainloop: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } g_fAbortMainLoop = false; pa_context_set_state_callback(g_pContext, drvHostPulseAudioCbCtxState, NULL); pa_threaded_mainloop_lock(g_pMainLoop); fLocked = true; if (pa_context_connect(g_pContext, NULL /* pszServer */, PA_CONTEXT_NOFLAGS, NULL) < 0) { LogRel(("PulseAudio: Failed to connect to server: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } /* Wait until the g_pContext is ready */ for (;;) { if (!g_fAbortMainLoop) pa_threaded_mainloop_wait(g_pMainLoop); g_fAbortMainLoop = false; pa_context_state_t cstate = pa_context_get_state(g_pContext); if (cstate == PA_CONTEXT_READY) break; else if ( cstate == PA_CONTEXT_TERMINATED || cstate == PA_CONTEXT_FAILED) { LogRel(("PulseAudio: Failed to initialize context (state %d)\n", cstate)); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } } while (0); if (fLocked) pa_threaded_mainloop_unlock(g_pMainLoop); if (RT_FAILURE(rc)) { if (g_pMainLoop) pa_threaded_mainloop_stop(g_pMainLoop); if (g_pContext) { pa_context_disconnect(g_pContext); pa_context_unref(g_pContext); g_pContext = NULL; } if (g_pMainLoop) { pa_threaded_mainloop_free(g_pMainLoop); g_pMainLoop = NULL; } } LogFlowFuncLeaveRC(rc); return rc; }
void AudioOutputPulseAudio::WriteAudio(uchar *aubuf, int size) { QString fn_log_tag = "WriteAudio, "; pa_stream_state_t sstate = pa_stream_get_state(pstream); VBAUDIOTS(fn_log_tag + QString("writing %1 bytes").arg(size)); /* NB This "if" check can be replaced with PA_STREAM_IS_GOOD() in PulseAudio API from 0.9.11. As 0.9.10 is still widely used we use the more verbose version for now */ if (sstate == PA_STREAM_CREATING || sstate == PA_STREAM_READY) { int write_status = PA_ERR_INVALID; size_t write; size_t writable; size_t to_write = size; unsigned char *buf_ptr = aubuf; pa_context_state_t cstate; pa_threaded_mainloop_lock(mainloop); while (to_write > 0) { write_status = 0; writable = pa_stream_writable_size(pstream); if (writable > 0) { write = min(to_write, writable); write_status = pa_stream_write(pstream, buf_ptr, write, NULL, 0, PA_SEEK_RELATIVE); if (!write_status) { buf_ptr += write; to_write -= write; } else break; } else if (writable < 0) break; else // writable == 0 pa_threaded_mainloop_wait(mainloop); } pa_threaded_mainloop_unlock(mainloop); if (to_write > 0) { if (writable < 0) { cstate = pa_context_get_state(pcontext); sstate = pa_stream_get_state(pstream); VBERROR(fn_log_tag + QString("stream unfit for writing (writable < 0), " "context state: %1, stream state: %2") .arg(cstate,0,16).arg(sstate,0,16)); } if (write_status != 0) VBERROR(fn_log_tag + QString("stream write failed: %1") .arg(write_status == PA_ERR_BADSTATE ? "PA_ERR_BADSTATE" : "PA_ERR_INVALID")); VBERROR(fn_log_tag + QString("short write, %1 of %2") .arg(size - to_write).arg(size)); } } else VBERROR(fn_log_tag + QString("stream state not good: %1") .arg(sstate,0,16)); }
PulseAudioWrapper::PulseAudioWrapper(QObject *parent) : QObject(parent), d(new PulseAudioWrapperPrivate(this)) { PulseAudioWrapperPrivate::paMainLoop = pa_threaded_mainloop_new(); pa_threaded_mainloop_start(PulseAudioWrapperPrivate::paMainLoop); PulseAudioWrapperPrivate::paMainLoopApi = pa_threaded_mainloop_get_api(PulseAudioWrapperPrivate::paMainLoop); pa_threaded_mainloop_lock(PulseAudioWrapperPrivate::paMainLoop); PulseAudioWrapperPrivate::paContext = pa_context_new(PulseAudioWrapperPrivate::paMainLoopApi, qApp->applicationName().toUtf8().data()); pa_context_set_state_callback(PulseAudioWrapperPrivate::paContext, &PulseAudioWrapperPrivate::onContextNotify, NULL); pa_context_connect(PulseAudioWrapperPrivate::paContext, NULL, PA_CONTEXT_NOFLAGS, NULL); bool done = false; pa_context_state_t contextState; while (!done) { switch (contextState = pa_context_get_state(d->paContext)) { case PA_CONTEXT_UNCONNECTED: qDebug() << "Context state: PA_CONTEXT_UNCONNECTED"; break; case PA_CONTEXT_CONNECTING: qDebug() << "Context state: PA_CONTEXT_CONNECTING"; break; case PA_CONTEXT_AUTHORIZING: qDebug() << "Context state: PA_CONTEXT_AUTHORIZING"; break; case PA_CONTEXT_SETTING_NAME: qDebug() << "Context state: PA_CONTEXT_SETTING_NAME"; break; case PA_CONTEXT_READY: qDebug() << "Context state: PA_CONTEXT_READY"; done = true; break; case PA_CONTEXT_FAILED: qDebug() << "Context state: PA_CONTEXT_FAILED"; done = true; break; case PA_CONTEXT_TERMINATED: qDebug() << "Context state: PA_CONTEXT_TERMINATED"; done = true; break; } if (!done) pa_threaded_mainloop_wait(PulseAudioWrapperPrivate::paMainLoop); } if (contextState != PA_CONTEXT_READY) throw CallRecorderException("Unable to connect PulseAudio context!"); pa_operation* listCardsOp = pa_context_get_card_info_list(PulseAudioWrapperPrivate::paContext, &PulseAudioWrapperPrivate::onCardInfoList, d.data()); pa_threaded_mainloop_wait(PulseAudioWrapperPrivate::paMainLoop); pa_operation_unref(listCardsOp); pa_operation* listSinksOp = pa_context_get_sink_info_list(PulseAudioWrapperPrivate::paContext, &PulseAudioWrapperPrivate::onSinkInfoList, d.data()); pa_threaded_mainloop_wait(PulseAudioWrapperPrivate::paMainLoop); pa_operation_unref(listSinksOp); pa_operation* listSourcesOp = pa_context_get_source_info_list(PulseAudioWrapperPrivate::paContext, &PulseAudioWrapperPrivate::onSourceInfoList, d.data()); pa_threaded_mainloop_wait(PulseAudioWrapperPrivate::paMainLoop); pa_operation_unref(listSourcesOp); pa_context_set_subscribe_callback(PulseAudioWrapperPrivate::paContext, &PulseAudioWrapperPrivate::onContextSubscription, d.data()); pa_operation* subscriptionOp = pa_context_subscribe(PulseAudioWrapperPrivate::paContext, static_cast< pa_subscription_mask_t >( PA_SUBSCRIPTION_MASK_CARD | PA_SUBSCRIPTION_MASK_SINK | PA_SUBSCRIPTION_MASK_SOURCE), &PulseAudioWrapperPrivate::onContextSubscriptionSuccess, d.data()); pa_threaded_mainloop_wait(PulseAudioWrapperPrivate::paMainLoop); pa_operation_unref(subscriptionOp); pa_threaded_mainloop_unlock(PulseAudioWrapperPrivate::paMainLoop); }
/***************************************************************************** * Open: open the audio device *****************************************************************************/ static int Open ( vlc_object_t *p_this ) { aout_instance_t *p_aout = (aout_instance_t *)p_this; struct aout_sys_t * p_sys; struct pa_sample_spec ss; const struct pa_buffer_attr *buffer_attr; struct pa_buffer_attr a; struct pa_channel_map map; /* Allocate structures */ p_aout->output.p_sys = p_sys = calloc( 1, sizeof( aout_sys_t ) ); if( p_sys == NULL ) return VLC_ENOMEM; PULSE_DEBUG( "Pulse start initialization"); ss.channels = aout_FormatNbChannels( &p_aout->output.output ); /* Get the input stream channel count */ /* Setup the pulse audio stream based on the input stream count */ switch(ss.channels) { case 8: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_CENTER | AOUT_CHAN_MIDDLELEFT | AOUT_CHAN_MIDDLERIGHT | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT | AOUT_CHAN_LFE; break; case 6: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_CENTER | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT | AOUT_CHAN_LFE; break; case 4: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT; break; case 2: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT; break; case 1: p_aout->output.output.i_physical_channels = AOUT_CHAN_CENTER; break; default: msg_Err(p_aout,"Invalid number of channels"); goto fail; } /* Add a quick command line info message */ msg_Info(p_aout, "No. of Audio Channels: %d", ss.channels); ss.rate = p_aout->output.output.i_rate; ss.format = PA_SAMPLE_FLOAT32NE; p_aout->output.output.i_format = VLC_CODEC_FL32; if (!pa_sample_spec_valid(&ss)) { msg_Err(p_aout,"Invalid sample spec"); goto fail; } /* Reduce overall latency to 200mS to reduce audible clicks * Also pulse minreq and internal buffers are now 20mS which reduces resampling */ a.tlength = pa_bytes_per_second(&ss)/5; a.maxlength = a.tlength * 2; a.prebuf = a.tlength / 2; a.minreq = a.tlength / 10; /* Buffer size is 20mS */ p_sys->buffer_size = a.minreq; /* Initialise the speaker map setup above */ pa_channel_map_init_auto(&map, ss.channels, PA_CHANNEL_MAP_ALSA); if (!(p_sys->mainloop = pa_threaded_mainloop_new())) { msg_Err(p_aout, "Failed to allocate main loop"); goto fail; } if (!(p_sys->context = pa_context_new(pa_threaded_mainloop_get_api(p_sys->mainloop), _( PULSE_CLIENT_NAME )))) { msg_Err(p_aout, "Failed to allocate context"); goto fail; } pa_context_set_state_callback(p_sys->context, context_state_cb, p_aout); PULSE_DEBUG( "Pulse before context connect"); if (pa_context_connect(p_sys->context, NULL, 0, NULL) < 0) { msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context))); goto fail; } PULSE_DEBUG( "Pulse after context connect"); pa_threaded_mainloop_lock(p_sys->mainloop); if (pa_threaded_mainloop_start(p_sys->mainloop) < 0) { msg_Err(p_aout, "Failed to start main loop"); goto unlock_and_fail; } msg_Dbg(p_aout, "Pulse mainloop started"); /* Wait until the context is ready */ pa_threaded_mainloop_wait(p_sys->mainloop); if (pa_context_get_state(p_sys->context) != PA_CONTEXT_READY) { msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } if (!(p_sys->stream = pa_stream_new(p_sys->context, "audio stream", &ss, &map))) { msg_Err(p_aout, "Failed to create stream: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } PULSE_DEBUG( "Pulse after new stream"); pa_stream_set_state_callback(p_sys->stream, stream_state_cb, p_aout); pa_stream_set_write_callback(p_sys->stream, stream_request_cb, p_aout); pa_stream_set_latency_update_callback(p_sys->stream, stream_latency_update_cb, p_aout); if (pa_stream_connect_playback(p_sys->stream, NULL, &a, PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_AUTO_TIMING_UPDATE|PA_STREAM_ADJUST_LATENCY, NULL, NULL) < 0) { msg_Err(p_aout, "Failed to connect stream: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } PULSE_DEBUG("Pulse stream connect"); /* Wait until the stream is ready */ pa_threaded_mainloop_wait(p_sys->mainloop); msg_Dbg(p_aout,"Pulse stream connected"); if (pa_stream_get_state(p_sys->stream) != PA_STREAM_READY) { msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } PULSE_DEBUG("Pulse after stream get status"); pa_threaded_mainloop_unlock(p_sys->mainloop); buffer_attr = pa_stream_get_buffer_attr(p_sys->stream); p_aout->output.i_nb_samples = buffer_attr->minreq / pa_frame_size(&ss); p_aout->output.pf_play = Play; aout_VolumeSoftInit(p_aout); msg_Dbg(p_aout, "Pulse initialized successfully"); { char cmt[PA_CHANNEL_MAP_SNPRINT_MAX], sst[PA_SAMPLE_SPEC_SNPRINT_MAX]; msg_Dbg(p_aout, "Buffer metrics: maxlength=%u, tlength=%u, prebuf=%u, minreq=%u", buffer_attr->maxlength, buffer_attr->tlength, buffer_attr->prebuf, buffer_attr->minreq); msg_Dbg(p_aout, "Using sample spec '%s', channel map '%s'.", pa_sample_spec_snprint(sst, sizeof(sst), pa_stream_get_sample_spec(p_sys->stream)), pa_channel_map_snprint(cmt, sizeof(cmt), pa_stream_get_channel_map(p_sys->stream))); msg_Dbg(p_aout, "Connected to device %s (%u, %ssuspended).", pa_stream_get_device_name(p_sys->stream), pa_stream_get_device_index(p_sys->stream), pa_stream_is_suspended(p_sys->stream) ? "" : "not "); } return VLC_SUCCESS; unlock_and_fail: msg_Dbg(p_aout, "Pulse initialization unlock and fail"); if (p_sys->mainloop) pa_threaded_mainloop_unlock(p_sys->mainloop); fail: msg_Err(p_aout, "Pulse initialization failed"); uninit(p_aout); return VLC_EGENERIC; }
pa_simple* pa_simple_new( const char *server, const char *name, pa_stream_direction_t dir, const char *dev, const char *stream_name, const pa_sample_spec *ss, const pa_channel_map *map, const pa_buffer_attr *attr, int *rerror) { pa_simple *p; int error = PA_ERR_INTERNAL, r; CHECK_VALIDITY_RETURN_ANY(rerror, !server || *server, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, dir == PA_STREAM_PLAYBACK || dir == PA_STREAM_RECORD, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !dev || *dev, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, ss && pa_sample_spec_valid(ss), PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !map || (pa_channel_map_valid(map) && map->channels == ss->channels), PA_ERR_INVALID, NULL) p = pa_xnew0(pa_simple, 1); p->direction = dir; if (!(p->mainloop = pa_threaded_mainloop_new())) goto fail; if (!(p->context = pa_context_new(pa_threaded_mainloop_get_api(p->mainloop), name))) goto fail; pa_context_set_state_callback(p->context, context_state_cb, p); if (pa_context_connect(p->context, server, 0, NULL) < 0) { error = pa_context_errno(p->context); goto fail; } pa_threaded_mainloop_lock(p->mainloop); if (pa_threaded_mainloop_start(p->mainloop) < 0) goto unlock_and_fail; for (;;) { pa_context_state_t state; state = pa_context_get_state(p->context); if (state == PA_CONTEXT_READY) break; if (!PA_CONTEXT_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the context is ready */ pa_threaded_mainloop_wait(p->mainloop); } if (!(p->stream = pa_stream_new(p->context, stream_name, ss, map))) { error = pa_context_errno(p->context); goto unlock_and_fail; } pa_stream_set_state_callback(p->stream, stream_state_cb, p); pa_stream_set_read_callback(p->stream, stream_request_cb, p); pa_stream_set_write_callback(p->stream, stream_request_cb, p); pa_stream_set_latency_update_callback(p->stream, stream_latency_update_cb, p); if (dir == PA_STREAM_PLAYBACK) r = pa_stream_connect_playback(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL); else r = pa_stream_connect_record(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE); if (r < 0) { error = pa_context_errno(p->context); goto unlock_and_fail; } for (;;) { pa_stream_state_t state; state = pa_stream_get_state(p->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the stream is ready */ pa_threaded_mainloop_wait(p->mainloop); } pa_threaded_mainloop_unlock(p->mainloop); return p; unlock_and_fail: pa_threaded_mainloop_unlock(p->mainloop); fail: if (rerror) *rerror = error; pa_simple_free(p); return NULL; }
static guint gst_pulsesrc_read (GstAudioSrc * asrc, gpointer data, guint length) { GstPulseSrc *pulsesrc = GST_PULSESRC_CAST (asrc); size_t sum = 0; pa_threaded_mainloop_lock (pulsesrc->mainloop); pulsesrc->in_read = TRUE; if (pulsesrc->paused) goto was_paused; while (length > 0) { size_t l; GST_LOG_OBJECT (pulsesrc, "reading %u bytes", length); /*check if we have a leftover buffer */ if (!pulsesrc->read_buffer) { for (;;) { if (gst_pulsesrc_is_dead (pulsesrc, TRUE)) goto unlock_and_fail; /* read all available data, we keep a pointer to the data and the length * and take from it what we need. */ if (pa_stream_peek (pulsesrc->stream, &pulsesrc->read_buffer, &pulsesrc->read_buffer_length) < 0) goto peek_failed; GST_LOG_OBJECT (pulsesrc, "have data of %" G_GSIZE_FORMAT " bytes", pulsesrc->read_buffer_length); /* if we have data, process if */ if (pulsesrc->read_buffer && pulsesrc->read_buffer_length) break; /* now wait for more data to become available */ GST_LOG_OBJECT (pulsesrc, "waiting for data"); pa_threaded_mainloop_wait (pulsesrc->mainloop); if (pulsesrc->paused) goto was_paused; } } l = pulsesrc->read_buffer_length > length ? length : pulsesrc->read_buffer_length; memcpy (data, pulsesrc->read_buffer, l); pulsesrc->read_buffer = (const guint8 *) pulsesrc->read_buffer + l; pulsesrc->read_buffer_length -= l; data = (guint8 *) data + l; length -= l; sum += l; if (pulsesrc->read_buffer_length <= 0) { /* we copied all of the data, drop it now */ if (pa_stream_drop (pulsesrc->stream) < 0) goto drop_failed; /* reset pointer to data */ pulsesrc->read_buffer = NULL; pulsesrc->read_buffer_length = 0; } } pulsesrc->in_read = FALSE; pa_threaded_mainloop_unlock (pulsesrc->mainloop); return sum; /* ERRORS */ was_paused: { GST_LOG_OBJECT (pulsesrc, "we are paused"); goto unlock_and_fail; } peek_failed: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("pa_stream_peek() failed: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } drop_failed: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("pa_stream_drop() failed: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } unlock_and_fail: { pulsesrc->in_read = FALSE; pa_threaded_mainloop_unlock (pulsesrc->mainloop); return (guint) - 1; } }
static int pulse_open() { ENTER(__FUNCTION__); pa_sample_spec ss; pa_operation *o = NULL; int success; int ret = PULSE_ERROR; assert(!mainloop); assert(!context); assert(!stream); assert(!connected); pthread_mutex_init( &pulse_mutex, (const pthread_mutexattr_t *)NULL); ss.format = ESPEAK_FORMAT; ss.rate = wave_samplerate; ss.channels = ESPEAK_CHANNEL; if (!pa_sample_spec_valid(&ss)) return false; SHOW_TIME("pa_threaded_mainloop_new (call)"); if (!(mainloop = pa_threaded_mainloop_new())) { SHOW("Failed to allocate main loop\n",""); goto fail; } pa_threaded_mainloop_lock(mainloop); SHOW_TIME("pa_context_new (call)"); if (!(context = pa_context_new(pa_threaded_mainloop_get_api(mainloop), "eSpeak"))) { SHOW("Failed to allocate context\n",""); goto unlock_and_fail; } pa_context_set_state_callback(context, context_state_cb, NULL); pa_context_set_subscribe_callback(context, subscribe_cb, NULL); SHOW_TIME("pa_context_connect (call)"); if (pa_context_connect(context, NULL, (pa_context_flags_t)0, NULL) < 0) { SHOW("Failed to connect to server: %s", pa_strerror(pa_context_errno(context))); ret = PULSE_NO_CONNECTION; goto unlock_and_fail; } SHOW_TIME("pa_threaded_mainloop_start (call)"); if (pa_threaded_mainloop_start(mainloop) < 0) { SHOW("Failed to start main loop",""); goto unlock_and_fail; } /* Wait until the context is ready */ SHOW_TIME("pa_threaded_mainloop_wait"); pa_threaded_mainloop_wait(mainloop); if (pa_context_get_state(context) != PA_CONTEXT_READY) { SHOW("Failed to connect to server: %s", pa_strerror(pa_context_errno(context))); ret = PULSE_NO_CONNECTION; if (mainloop) pa_threaded_mainloop_stop(mainloop); goto unlock_and_fail; } SHOW_TIME("pa_stream_new"); if (!(stream = pa_stream_new(context, "unknown", &ss, NULL))) { SHOW("Failed to create stream: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } pa_stream_set_state_callback(stream, stream_state_cb, NULL); pa_stream_set_write_callback(stream, stream_request_cb, NULL); pa_stream_set_latency_update_callback(stream, stream_latency_update_cb, NULL); pa_buffer_attr a_attr; a_attr.maxlength = MAXLENGTH; a_attr.tlength = TLENGTH; a_attr.prebuf = PREBUF; a_attr.minreq = MINREQ; a_attr.fragsize = 0; SHOW_TIME("pa_connect_playback"); if (pa_stream_connect_playback(stream, NULL, &a_attr, (pa_stream_flags_t)(PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_AUTO_TIMING_UPDATE), NULL, NULL) < 0) { SHOW("Failed to connect stream: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } /* Wait until the stream is ready */ SHOW_TIME("pa_threaded_mainloop_wait"); pa_threaded_mainloop_wait(mainloop); if (pa_stream_get_state(stream) != PA_STREAM_READY) { SHOW("Failed to connect stream: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } /* Now subscribe to events */ SHOW_TIME("pa_context_subscribe"); if (!(o = pa_context_subscribe(context, PA_SUBSCRIPTION_MASK_SINK_INPUT, context_success_cb, &success))) { SHOW("pa_context_subscribe() failed: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } success = 0; SHOW_TIME("pa_threaded_mainloop_wait"); while (pa_operation_get_state(o) != PA_OPERATION_DONE) { CHECK_DEAD_GOTO(fail, 1); pa_threaded_mainloop_wait(mainloop); } pa_operation_unref(o); if (!success) { SHOW("pa_context_subscribe() failed: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } do_trigger = 0; written = 0; time_offset_msec = 0; just_flushed = 0; connected = 1; pa_threaded_mainloop_unlock(mainloop); SHOW_TIME("pulse_open (ret true)"); return PULSE_OK; unlock_and_fail: if (o) pa_operation_unref(o); pa_threaded_mainloop_unlock(mainloop); fail: // pulse_close(); if (ret == PULSE_NO_CONNECTION) { if (context) { SHOW_TIME("pa_context_disconnect (call)"); pa_context_disconnect(context); pa_context_unref(context); context = NULL; } if (mainloop) { SHOW_TIME("pa_threaded_mainloop_free (call)"); pa_threaded_mainloop_free(mainloop); mainloop = NULL; } } else { pulse_close(); } SHOW_TIME("pulse_open (ret false)"); return ret; }
int sa_stream_create_pcm( sa_stream_t ** _s, const char * client_name, sa_mode_t mode, sa_pcm_format_t format, unsigned int rate, unsigned int n_channels ) { sa_stream_t * s = 0; char *server = NULL; /* * Make sure we return a NULL stream pointer on failure. */ if (_s == NULL) { return SA_ERROR_INVALID; } *_s = NULL; if (mode != SA_MODE_WRONLY) { return SA_ERROR_NOT_SUPPORTED; } if (format != SA_PCM_FORMAT_S16_LE) { return SA_ERROR_NOT_SUPPORTED; } /* * Allocate the instance and required resources. */ if ((s = malloc(sizeof(sa_stream_t))) == NULL) { return SA_ERROR_OOM; } if ((s->bl_head = new_buffer()) == NULL) { free(s); return SA_ERROR_OOM; } if (pthread_mutex_init(&s->mutex, NULL) != 0) { free(s->bl_head); free(s); return SA_ERROR_SYSTEM; } s->stream = NULL; s->m = NULL; s->thread_id = 0; s->playing = 0; s->bytes_written = 0; s->bl_tail = s->bl_head; s->n_bufs = 1; s->sample_spec.format = PA_SAMPLE_S16LE; s->sample_spec.channels = n_channels; s->sample_spec.rate = rate; strcpy(s->client_name, client_name); /* Set up a new main loop */ s->m = pa_threaded_mainloop_new(); pa_threaded_mainloop_start(s->m); pa_threaded_mainloop_lock(s->m); /* Create a new connection context */ if (!(s->context = pa_context_new(pa_threaded_mainloop_get_api(s->m), "OggPlay"))) { fprintf(stderr, "pa_context_new() failed.\n"); goto unlock_and_fail; } pa_context_set_state_callback(s->context, context_state_callback, s); pa_context_connect(s->context, server, 0, NULL); /* Wait until the context is ready */ pa_threaded_mainloop_wait(s->m); if (pa_context_get_state(s->context) != PA_CONTEXT_READY) { fprintf(stderr, "creating Pulseaudio Context failed\n"); goto unlock_and_fail; } pa_threaded_mainloop_unlock(s->m); *_s = s; return SA_SUCCESS; unlock_and_fail: pa_threaded_mainloop_unlock(s->m); free(s); return SA_ERROR_OOM; }
static int pulse_open(int fmt, int rate, int nch) { pa_sample_spec ss; pa_operation *o = NULL; int success; assert(!mainloop); assert(!context); assert(!stream); assert(!connected); switch(fmt) { case FMT_U8: ss.format = PA_SAMPLE_U8; break; case FMT_S16_LE: ss.format = PA_SAMPLE_S16LE; break; case FMT_S16_BE: ss.format = PA_SAMPLE_S16BE; break; #ifdef PA_SAMPLE_S24_32LE case FMT_S24_LE: ss.format = PA_SAMPLE_S24_32LE; break; case FMT_S24_BE: ss.format = PA_SAMPLE_S24_32BE; break; #endif #ifdef PA_SAMPLE_S32LE case FMT_S32_LE: ss.format = PA_SAMPLE_S32LE; break; case FMT_S32_BE: ss.format = PA_SAMPLE_S32BE; break; #endif case FMT_FLOAT: ss.format = PA_SAMPLE_FLOAT32NE; break; default: return FALSE; } ss.rate = rate; ss.channels = nch; if (!pa_sample_spec_valid(&ss)) return FALSE; if (!(mainloop = pa_threaded_mainloop_new())) { ERROR ("Failed to allocate main loop"); goto fail; } pa_threaded_mainloop_lock(mainloop); if (!(context = pa_context_new(pa_threaded_mainloop_get_api(mainloop), "Audacious"))) { ERROR ("Failed to allocate context"); goto unlock_and_fail; } pa_context_set_state_callback(context, context_state_cb, NULL); pa_context_set_subscribe_callback(context, subscribe_cb, NULL); if (pa_context_connect(context, NULL, 0, NULL) < 0) { ERROR ("Failed to connect to server: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } if (pa_threaded_mainloop_start(mainloop) < 0) { ERROR ("Failed to start main loop"); goto unlock_and_fail; } /* Wait until the context is ready */ pa_threaded_mainloop_wait(mainloop); if (pa_context_get_state(context) != PA_CONTEXT_READY) { ERROR ("Failed to connect to server: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } if (!(stream = pa_stream_new(context, "Audacious", &ss, NULL))) { ERROR ("Failed to create stream: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } pa_stream_set_state_callback(stream, stream_state_cb, NULL); pa_stream_set_write_callback(stream, stream_request_cb, NULL); pa_stream_set_latency_update_callback(stream, stream_latency_update_cb, NULL); /* Connect stream with sink and default volume */ /* Buffer struct */ int aud_buffer = aud_get_int(NULL, "output_buffer_size"); size_t buffer_size = pa_usec_to_bytes(aud_buffer, &ss) * 1000; pa_buffer_attr buffer = {(uint32_t) -1, buffer_size, (uint32_t) -1, (uint32_t) -1, buffer_size}; if (pa_stream_connect_playback(stream, NULL, &buffer, PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL) < 0) { ERROR ("Failed to connect stream: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } /* Wait until the stream is ready */ pa_threaded_mainloop_wait(mainloop); if (pa_stream_get_state(stream) != PA_STREAM_READY) { ERROR ("Failed to connect stream: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } /* Now subscribe to events */ if (!(o = pa_context_subscribe(context, PA_SUBSCRIPTION_MASK_SINK_INPUT, context_success_cb, &success))) { ERROR ("pa_context_subscribe() failed: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } success = 0; while (pa_operation_get_state(o) != PA_OPERATION_DONE) { CHECK_DEAD_GOTO(fail, 1); pa_threaded_mainloop_wait(mainloop); } if (!success) { ERROR ("pa_context_subscribe() failed: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } pa_operation_unref(o); /* Now request the initial stream info */ if (!(o = pa_context_get_sink_input_info(context, pa_stream_get_index(stream), info_cb, NULL))) { ERROR ("pa_context_get_sink_input_info() failed: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } while (pa_operation_get_state(o) != PA_OPERATION_DONE) { CHECK_DEAD_GOTO(fail, 1); pa_threaded_mainloop_wait(mainloop); } if (!volume_valid) { ERROR ("pa_context_get_sink_input_info() failed: %s", pa_strerror(pa_context_errno(context))); goto unlock_and_fail; } pa_operation_unref(o); do_trigger = 0; written = 0; flush_time = 0; bytes_per_second = FMT_SIZEOF (fmt) * nch * rate; connected = 1; volume_time_event = NULL; pa_threaded_mainloop_unlock(mainloop); return TRUE; unlock_and_fail: if (o) pa_operation_unref(o); pa_threaded_mainloop_unlock(mainloop); fail: pulse_close(); return FALSE; }
static int pa_audio_start(audio_mode_t *am, audio_fifo_t *af) { pa_audio_mode_t *pam = (pa_audio_mode_t *)am; audio_buf_t *ab = NULL; size_t l, length; int64_t pts; media_pipe_t *mp; int r = 0; pa_threaded_mainloop_lock(mainloop); #if PA_API_VERSION >= 12 pa_proplist *pl = pa_proplist_new(); pa_proplist_sets(pl, PA_PROP_APPLICATION_ID, "com.lonelycoder.hts.showtime"); pa_proplist_sets(pl, PA_PROP_APPLICATION_NAME, "Showtime"); /* Create a new connection context */ pam->context = pa_context_new_with_proplist(api, "Showtime", pl); pa_proplist_free(pl); #else pam->context = pa_context_new(api, "Showtime"); #endif if(pam->context == NULL) { pa_threaded_mainloop_unlock(mainloop); return -1; } pa_context_set_state_callback(pam->context, context_state_callback, pam); /* Connect the context */ if(pa_context_connect(pam->context, NULL, 0, NULL) < 0) { TRACE(TRACE_ERROR, "PA", "pa_context_connect() failed: %s", pa_strerror(pa_context_errno(pam->context))); pa_threaded_mainloop_unlock(mainloop); return -1; } /* Need at least one packet of audio */ /* Subscribe to updates of master volume */ pam->sub_mvol = prop_subscribe(PROP_SUB_DIRECT_UPDATE, PROP_TAG_CALLBACK_FLOAT, set_mastervol, pam, PROP_TAG_ROOT, prop_mastervol, PROP_TAG_EXTERNAL_LOCK, mainloop, prop_pa_lockmgr, NULL); /* Subscribe to updates of master volume mute */ pam->sub_mute = prop_subscribe(PROP_SUB_DIRECT_UPDATE, PROP_TAG_CALLBACK_INT, set_mastermute, pam, PROP_TAG_ROOT, prop_mastermute, PROP_TAG_EXTERNAL_LOCK, mainloop, prop_pa_lockmgr, NULL); while(1) { if(ab == NULL) { pa_threaded_mainloop_unlock(mainloop); ab = af_deq2(af, 1, am); pa_threaded_mainloop_lock(mainloop); if(ab == AF_EXIT) { ab = NULL; break; } } if(pa_context_get_state(pam->context) == PA_CONTEXT_TERMINATED || pa_context_get_state(pam->context) == PA_CONTEXT_FAILED) { r = -1; break; } if(pam->stream != NULL && (pam->cur_format != ab->ab_format || pam->cur_rate != ab->ab_samplerate || pam->cur_isfloat != ab->ab_isfloat)) { stream_destroy(pam); } if(pam->stream == NULL && pa_context_get_state(pam->context) == PA_CONTEXT_READY) { /* Context is ready, but we don't have a stream yet, set it up */ stream_setup(pam, ab); } if(pam->stream == NULL) { pa_threaded_mainloop_wait(mainloop); continue; } switch(pa_stream_get_state(pam->stream)) { case PA_STREAM_UNCONNECTED: case PA_STREAM_CREATING: pa_threaded_mainloop_wait(mainloop); continue; case PA_STREAM_READY: break; case PA_STREAM_TERMINATED: case PA_STREAM_FAILED: pa_stream_unref(pam->stream); pam->stream = NULL; char msg[100]; snprintf(msg, sizeof(msg), "Audio stream disconnected from " "PulseAudio server -- %s.", pa_strerror(pam->stream_error)); mp_flush(ab->ab_mp, 0); mp_enqueue_event(ab->ab_mp, event_create_str(EVENT_INTERNAL_PAUSE, msg)); audio_fifo_purge(af, NULL, NULL); if(ab != NULL) { ab_free(ab); ab = NULL; } continue; } if(ab->ab_flush) { pa_operation *o; o = pa_stream_flush(pam->stream, NULL, NULL); if(o != NULL) pa_operation_unref(o); ab->ab_flush = 0; } l = pa_stream_writable_size(pam->stream); if(l == 0) { pa_threaded_mainloop_wait(mainloop); continue; } length = ab->ab_frames * pa_frame_size(&pam->ss) - ab->ab_tmp; if(l > length) l = length; if((pts = ab->ab_pts) != AV_NOPTS_VALUE && ab->ab_mp != NULL) { int64_t pts; pa_usec_t delay; pts = ab->ab_pts; ab->ab_pts = AV_NOPTS_VALUE; if(!pa_stream_get_latency(pam->stream, &delay, NULL)) { mp = ab->ab_mp; hts_mutex_lock(&mp->mp_clock_mutex); mp->mp_audio_clock = pts - delay; mp->mp_audio_clock_realtime = showtime_get_ts(); mp->mp_audio_clock_epoch = ab->ab_epoch; hts_mutex_unlock(&mp->mp_clock_mutex); } } pa_stream_write(pam->stream, ab->ab_data + ab->ab_tmp, l, NULL, 0LL, PA_SEEK_RELATIVE); ab->ab_tmp += l; assert(ab->ab_tmp <= ab->ab_frames * pa_frame_size(&pam->ss)); if(ab->ab_frames * pa_frame_size(&pam->ss) == ab->ab_tmp) { ab_free(ab); ab = NULL; } } prop_unsubscribe(pam->sub_mvol); prop_unsubscribe(pam->sub_mute); if(pam->stream != NULL) stream_destroy(pam); pa_threaded_mainloop_unlock(mainloop); pa_context_unref(pam->context); if(ab != NULL) { ab_free(ab); ab = NULL; } return r; }
static void *pulse_init(const char *device, unsigned rate, unsigned latency) { pa_sample_spec spec; memset(&spec, 0, sizeof(spec)); pa_buffer_attr buffer_attr = {0}; pa_t *pa = (pa_t*)calloc(1, sizeof(*pa)); if (!pa) goto error; pa->mainloop = pa_threaded_mainloop_new(); if (!pa->mainloop) goto error; pa->context = pa_context_new(pa_threaded_mainloop_get_api(pa->mainloop), "RetroArch"); if (!pa->context) goto error; pa_context_set_state_callback(pa->context, context_state_cb, pa); if (pa_context_connect(pa->context, device, PA_CONTEXT_NOFLAGS, NULL) < 0) goto error; pa_threaded_mainloop_lock(pa->mainloop); if (pa_threaded_mainloop_start(pa->mainloop) < 0) goto error; pa_threaded_mainloop_wait(pa->mainloop); if (pa_context_get_state(pa->context) != PA_CONTEXT_READY) goto unlock_error; spec.format = is_little_endian() ? PA_SAMPLE_FLOAT32LE : PA_SAMPLE_FLOAT32BE; spec.channels = 2; spec.rate = rate; pa->stream = pa_stream_new(pa->context, "audio", &spec, NULL); if (!pa->stream) goto unlock_error; pa_stream_set_state_callback(pa->stream, stream_state_cb, pa); pa_stream_set_write_callback(pa->stream, stream_request_cb, pa); pa_stream_set_latency_update_callback(pa->stream, stream_latency_update_cb, pa); buffer_attr.maxlength = -1; buffer_attr.tlength = pa_usec_to_bytes(latency * PA_USEC_PER_MSEC, &spec); buffer_attr.prebuf = -1; buffer_attr.minreq = -1; buffer_attr.fragsize = -1; pa->buffer_size = buffer_attr.tlength; if (pa_stream_connect_playback(pa->stream, NULL, &buffer_attr, PA_STREAM_ADJUST_LATENCY, NULL, NULL) < 0) goto error; pa_threaded_mainloop_wait(pa->mainloop); if (pa_stream_get_state(pa->stream) != PA_STREAM_READY) goto unlock_error; pa_threaded_mainloop_unlock(pa->mainloop); return pa; unlock_error: pa_threaded_mainloop_unlock(pa->mainloop); error: pulse_free(pa); return NULL; }
bool AudioOutputPulseAudio::ConnectPlaybackStream(void) { QString fn_log_tag = "ConnectPlaybackStream, "; pa_proplist *proplist = pa_proplist_new(); if (!proplist) { VBERROR(fn_log_tag + QString("failed to create new proplist")); return false; } pa_proplist_sets(proplist, PA_PROP_MEDIA_ROLE, "video"); pstream = pa_stream_new_with_proplist(pcontext, "MythTV playback", &sample_spec, &channel_map, proplist); if (!pstream) { VBERROR("failed to create new playback stream"); return false; } pa_stream_set_state_callback(pstream, StreamStateCallback, this); pa_stream_set_write_callback(pstream, WriteCallback, this); pa_stream_set_overflow_callback(pstream, BufferFlowCallback, (char*)"over"); pa_stream_set_underflow_callback(pstream, BufferFlowCallback, (char*)"under"); if (set_initial_vol) { int volume = gCoreContext->GetNumSetting("MasterMixerVolume", 80); pa_cvolume_set(&volume_control, channels, (float)volume * (float)PA_VOLUME_NORM / 100.0f); } else pa_cvolume_reset(&volume_control, channels); fragment_size = (samplerate * 25 * output_bytes_per_frame) / 1000; buffer_settings.maxlength = (uint32_t)-1; buffer_settings.tlength = fragment_size * 4; buffer_settings.prebuf = (uint32_t)-1; buffer_settings.minreq = (uint32_t)-1; int flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NO_REMIX_CHANNELS; pa_stream_connect_playback(pstream, NULL, &buffer_settings, (pa_stream_flags_t)flags, NULL, NULL); pa_context_state_t cstate; pa_stream_state_t sstate; bool connected = false, failed = false; while (!(connected || failed)) { switch (cstate = pa_context_get_state(pcontext)) { case PA_CONTEXT_FAILED: case PA_CONTEXT_TERMINATED: VBERROR(QString("context is stuffed, %1") .arg(pa_strerror(pa_context_errno(pcontext)))); failed = true; break; default: switch (sstate = pa_stream_get_state(pstream)) { case PA_STREAM_READY: connected = true; break; case PA_STREAM_FAILED: case PA_STREAM_TERMINATED: VBERROR(QString("stream failed or was terminated, " "context state %1, stream state %2") .arg(cstate).arg(sstate)); failed = true; break; default: pa_threaded_mainloop_wait(mainloop); break; } } } const pa_buffer_attr *buf_attr = pa_stream_get_buffer_attr(pstream); fragment_size = buf_attr->tlength >> 2; soundcard_buffer_size = buf_attr->maxlength; VBAUDIO(QString("fragment size %1, soundcard buffer size %2") .arg(fragment_size).arg(soundcard_buffer_size)); return (connected && !failed); }
AudioOutputSettings* AudioOutputPulseAudio::GetOutputSettings(bool /*digital*/) { AudioFormat fmt; m_aosettings = new AudioOutputSettings(); QString fn_log_tag = "OpenDevice, "; /* Start the mainloop and connect a context so we can retrieve the parameters of the default sink */ mainloop = pa_threaded_mainloop_new(); if (!mainloop) { VBERROR(fn_log_tag + "Failed to get new threaded mainloop"); delete m_aosettings; return NULL; } pa_threaded_mainloop_start(mainloop); pa_threaded_mainloop_lock(mainloop); if (!ContextConnect()) { pa_threaded_mainloop_unlock(mainloop); pa_threaded_mainloop_stop(mainloop); delete m_aosettings; return NULL; } /* Get the samplerate and channel count of the default sink, supported rate and channels are added in SinkInfoCallback */ /* We should in theory be able to feed pulse any samplerate but allowing it to resample results in weird behaviour (odd channel maps, static) post pause / reset */ pa_operation *op = pa_context_get_sink_info_by_index(pcontext, 0, SinkInfoCallback, this); if (op) { pa_operation_unref(op); pa_threaded_mainloop_wait(mainloop); } else VBERROR("Failed to determine default sink samplerate"); pa_threaded_mainloop_unlock(mainloop); // All formats except S24 (pulse wants S24LSB) while ((fmt = m_aosettings->GetNextFormat())) { if (fmt == FORMAT_S24 // define from PA 0.9.15 only #ifndef PA_MAJOR || fmt == FORMAT_S24LSB #endif ) continue; m_aosettings->AddSupportedFormat(fmt); } pa_context_disconnect(pcontext); pa_context_unref(pcontext); pcontext = NULL; pa_threaded_mainloop_stop(mainloop); mainloop = NULL; return m_aosettings; }
static int outstream_open_pa(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os) { struct SoundIoOutStreamPulseAudio *ospa = &os->backend_data.pulseaudio; struct SoundIoOutStream *outstream = &os->pub; if ((unsigned)outstream->layout.channel_count > PA_CHANNELS_MAX) return SoundIoErrorIncompatibleBackend; if (!outstream->name) outstream->name = "SoundIoOutStream"; struct SoundIoPulseAudio *sipa = &si->backend_data.pulseaudio; SOUNDIO_ATOMIC_STORE(ospa->stream_ready, false); SOUNDIO_ATOMIC_FLAG_TEST_AND_SET(ospa->clear_buffer_flag); assert(sipa->pulse_context); pa_threaded_mainloop_lock(sipa->main_loop); pa_sample_spec sample_spec; sample_spec.format = to_pulseaudio_format(outstream->format); sample_spec.rate = outstream->sample_rate; sample_spec.channels = outstream->layout.channel_count; pa_channel_map channel_map = to_pulseaudio_channel_map(&outstream->layout); ospa->stream = pa_stream_new(sipa->pulse_context, outstream->name, &sample_spec, &channel_map); if (!ospa->stream) { pa_threaded_mainloop_unlock(sipa->main_loop); outstream_destroy_pa(si, os); return SoundIoErrorNoMem; } pa_stream_set_state_callback(ospa->stream, playback_stream_state_callback, os); ospa->buffer_attr.maxlength = UINT32_MAX; ospa->buffer_attr.tlength = UINT32_MAX; ospa->buffer_attr.prebuf = 0; ospa->buffer_attr.minreq = UINT32_MAX; ospa->buffer_attr.fragsize = UINT32_MAX; int bytes_per_second = outstream->bytes_per_frame * outstream->sample_rate; if (outstream->software_latency > 0.0) { int buffer_length = outstream->bytes_per_frame * ceil_dbl_to_int(outstream->software_latency * bytes_per_second / (double)outstream->bytes_per_frame); ospa->buffer_attr.maxlength = buffer_length; ospa->buffer_attr.tlength = buffer_length; } pa_stream_flags_t flags = (pa_stream_flags_t)(PA_STREAM_START_CORKED | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_INTERPOLATE_TIMING); int err = pa_stream_connect_playback(ospa->stream, outstream->device->id, &ospa->buffer_attr, flags, NULL, NULL); if (err) { pa_threaded_mainloop_unlock(sipa->main_loop); return SoundIoErrorOpeningDevice; } while (!SOUNDIO_ATOMIC_LOAD(ospa->stream_ready)) pa_threaded_mainloop_wait(sipa->main_loop); pa_operation *update_timing_info_op = pa_stream_update_timing_info(ospa->stream, timing_update_callback, si); if ((err = perform_operation(si, update_timing_info_op))) { pa_threaded_mainloop_unlock(sipa->main_loop); return err; } size_t writable_size = pa_stream_writable_size(ospa->stream); outstream->software_latency = ((double)writable_size) / (double)bytes_per_second; pa_threaded_mainloop_unlock(sipa->main_loop); return 0; }
static void audio_callback(void* data) { sa_stream_t* s = (sa_stream_t*)data; unsigned int bytes_per_frame = s->sample_spec.channels * pa_sample_size(&s->sample_spec); size_t buffer_size = s->sample_spec.rate * bytes_per_frame; char* buffer = malloc(buffer_size); while(1) { char* dst = buffer; size_t bytes_to_copy, bytes; pa_threaded_mainloop_lock(s->m); while(1) { if (s == NULL || s->stream == NULL) { if (s != NULL && s->m != NULL) pa_threaded_mainloop_unlock(s->m); goto free_buffer; } if ((bytes_to_copy = pa_stream_writable_size(s->stream)) == (size_t) -1) { fprintf(stderr, "pa_stream_writable_size() failed: %s", pa_strerror(pa_context_errno(s->context))); pa_threaded_mainloop_unlock(s->m); goto free_buffer; } if(bytes_to_copy > 0) break; pa_threaded_mainloop_wait(s->m); } pa_threaded_mainloop_unlock(s->m); if (bytes_to_copy > buffer_size) bytes_to_copy = buffer_size; bytes = bytes_to_copy; pthread_mutex_lock(&s->mutex); if (!s->thread_id) { pthread_mutex_unlock(&s->mutex); break; } /* * Consume data from the start of the buffer list. */ while (1) { unsigned int avail = s->bl_head->end - s->bl_head->start; assert(s->bl_head->start <= s->bl_head->end); if (avail >= bytes_to_copy) { /* * We have all we need in the head buffer, so just grab it and go. */ memcpy(dst, s->bl_head->data + s->bl_head->start, bytes_to_copy); s->bl_head->start += bytes_to_copy; break; } else { sa_buf* next = 0; /* * Copy what we can from the head and move on to the next buffer. */ memcpy(dst, s->bl_head->data + s->bl_head->start, avail); s->bl_head->start += avail; dst += avail; bytes_to_copy -= avail; /* * We want to free the now-empty buffer, but not if it's also the * current tail. If it is the tail, we don't have enough data to fill * the destination buffer, so we write less and give up. */ next = s->bl_head->next; if (next == NULL) { bytes = bytes-bytes_to_copy; break; } free(s->bl_head); s->bl_head = next; s->n_bufs--; } /* if (avail >= bytes_to_copy), else */ } /* while (1) */ if(bytes > 0) { pa_threaded_mainloop_lock(s->m); if (pa_stream_write(s->stream, buffer, bytes, NULL, 0, PA_SEEK_RELATIVE) < 0) { fprintf(stderr, "pa_stream_write() failed: %s", pa_strerror(pa_context_errno(s->context))); pa_threaded_mainloop_unlock(s->m); return; } pa_stream_update_timing_info(s->stream, NULL, NULL); s->bytes_written += bytes; pa_threaded_mainloop_unlock(s->m); } pthread_mutex_unlock(&s->mutex); } free_buffer: free(buffer); }
static BOOL tsmf_pulse_open_stream(TSMFPulseAudioDevice *pulse) { pa_stream_state_t state; pa_buffer_attr buffer_attr = { 0 }; if(!pulse->context) return FALSE; DEBUG_TSMF(""); pa_threaded_mainloop_lock(pulse->mainloop); pulse->stream = pa_stream_new(pulse->context, "freerdp", &pulse->sample_spec, NULL); if(!pulse->stream) { pa_threaded_mainloop_unlock(pulse->mainloop); WLog_ERR(TAG, "pa_stream_new failed (%d)", pa_context_errno(pulse->context)); return FALSE; } pa_stream_set_state_callback(pulse->stream, tsmf_pulse_stream_state_callback, pulse); pa_stream_set_write_callback(pulse->stream, tsmf_pulse_stream_request_callback, pulse); buffer_attr.maxlength = pa_usec_to_bytes(500000, &pulse->sample_spec); buffer_attr.tlength = pa_usec_to_bytes(250000, &pulse->sample_spec); buffer_attr.prebuf = (UINT32) -1; buffer_attr.minreq = (UINT32) -1; buffer_attr.fragsize = (UINT32) -1; if(pa_stream_connect_playback(pulse->stream, pulse->device[0] ? pulse->device : NULL, &buffer_attr, PA_STREAM_ADJUST_LATENCY | PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL) < 0) { pa_threaded_mainloop_unlock(pulse->mainloop); WLog_ERR(TAG, "pa_stream_connect_playback failed (%d)", pa_context_errno(pulse->context)); return FALSE; } for(;;) { state = pa_stream_get_state(pulse->stream); if(state == PA_STREAM_READY) break; if(!PA_STREAM_IS_GOOD(state)) { WLog_ERR(TAG, "bad stream state (%d)", pa_context_errno(pulse->context)); break; } pa_threaded_mainloop_wait(pulse->mainloop); } pa_threaded_mainloop_unlock(pulse->mainloop); if(state == PA_STREAM_READY) { DEBUG_TSMF("connected"); return TRUE; } else { tsmf_pulse_close_stream(pulse); return FALSE; } }
bool CAESinkPULSE::Initialize(AEAudioFormat &format, std::string &device) { { CSingleLock lock(m_sec); m_IsAllocated = false; } m_passthrough = false; m_BytesPerSecond = 0; m_BufferSize = 0; m_filled_bytes = 0; m_lastPackageStamp = 0; m_Channels = 0; m_Stream = NULL; m_Context = NULL; m_periodSize = 0; if (!SetupContext(NULL, &m_Context, &m_MainLoop)) { CLog::Log(LOGNOTICE, "PulseAudio might not be running. Context was not created."); Deinitialize(); return false; } pa_threaded_mainloop_lock(m_MainLoop); struct pa_channel_map map; pa_channel_map_init(&map); // PULSE cannot cope with e.g. planar formats so we fallback to FLOAT // when we receive an invalid pulse format if (AEFormatToPulseFormat(format.m_dataFormat) == PA_SAMPLE_INVALID) { CLog::Log(LOGDEBUG, "PULSE does not support format: %s - will fallback to AE_FMT_FLOAT", CAEUtil::DataFormatToStr(format.m_dataFormat)); format.m_dataFormat = AE_FMT_FLOAT; } m_passthrough = AE_IS_RAW(format.m_dataFormat); if(m_passthrough) { map.channels = 2; format.m_channelLayout = AE_CH_LAYOUT_2_0; } else { map = AEChannelMapToPAChannel(format.m_channelLayout); // if count has changed we need to fit the AE Map if(map.channels != format.m_channelLayout.Count()) format.m_channelLayout = PAChannelToAEChannelMap(map); } m_Channels = format.m_channelLayout.Count(); // store information about current sink SinkInfoStruct sinkStruct; sinkStruct.mainloop = m_MainLoop; sinkStruct.device_found = false; // get real sample rate of the device we want to open - to avoid resampling bool isDefaultDevice = (device == "Default"); WaitForOperation(pa_context_get_sink_info_by_name(m_Context, isDefaultDevice ? NULL : device.c_str(), SinkInfoCallback, &sinkStruct), m_MainLoop, "Get Sink Info"); // only check if the device is existing - don't alter the sample rate if (!sinkStruct.device_found) { CLog::Log(LOGERROR, "PulseAudio: Sink %s not found", device.c_str()); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } // Pulse can resample everything between 1 hz and 192000 hz // Make sure we are in the range that we originally added format.m_sampleRate = std::max(5512U, std::min(format.m_sampleRate, 192000U)); pa_format_info *info[1]; info[0] = pa_format_info_new(); info[0]->encoding = AEFormatToPulseEncoding(format.m_dataFormat); if(!m_passthrough) { pa_format_info_set_sample_format(info[0], AEFormatToPulseFormat(format.m_dataFormat)); pa_format_info_set_channel_map(info[0], &map); } pa_format_info_set_channels(info[0], m_Channels); // PA requires m_encodedRate in order to do EAC3 unsigned int samplerate = format.m_sampleRate; if (m_passthrough && (AEFormatToPulseEncoding(format.m_dataFormat) == PA_ENCODING_EAC3_IEC61937)) { // this is only used internally for PA to use EAC3 samplerate = format.m_encodedRate; } pa_format_info_set_rate(info[0], samplerate); if (!pa_format_info_valid(info[0])) { CLog::Log(LOGERROR, "PulseAudio: Invalid format info"); pa_format_info_free(info[0]); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } pa_sample_spec spec; #if PA_CHECK_VERSION(2,0,0) pa_format_info_to_sample_spec(info[0], &spec, NULL); #else spec.rate = (AEFormatToPulseEncoding(format.m_dataFormat) == PA_ENCODING_EAC3_IEC61937) ? 4 * samplerate : samplerate; spec.format = AEFormatToPulseFormat(format.m_dataFormat); spec.channels = m_Channels; #endif if (!pa_sample_spec_valid(&spec)) { CLog::Log(LOGERROR, "PulseAudio: Invalid sample spec"); pa_format_info_free(info[0]); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } m_BytesPerSecond = pa_bytes_per_second(&spec); unsigned int frameSize = pa_frame_size(&spec); m_Stream = pa_stream_new_extended(m_Context, "kodi audio stream", info, 1, NULL); pa_format_info_free(info[0]); if (m_Stream == NULL) { CLog::Log(LOGERROR, "PulseAudio: Could not create a stream"); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } pa_stream_set_state_callback(m_Stream, StreamStateCallback, m_MainLoop); pa_stream_set_write_callback(m_Stream, StreamRequestCallback, m_MainLoop); pa_stream_set_latency_update_callback(m_Stream, StreamLatencyUpdateCallback, m_MainLoop); // default buffer construction // align with AE's max buffer unsigned int latency = m_BytesPerSecond / 2.5; // 400 ms unsigned int process_time = latency / 4; // 100 ms if(sinkStruct.isHWDevice) { // on hw devices buffers can be further reduced // 200ms max latency // 50ms min packet size latency = m_BytesPerSecond / 5; process_time = latency / 4; } pa_buffer_attr buffer_attr; buffer_attr.fragsize = latency; buffer_attr.maxlength = (uint32_t) -1; buffer_attr.minreq = process_time; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.tlength = latency; if (pa_stream_connect_playback(m_Stream, isDefaultDevice ? NULL : device.c_str(), &buffer_attr, ((pa_stream_flags)(PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY)), NULL, NULL) < 0) { CLog::Log(LOGERROR, "PulseAudio: Failed to connect stream to output"); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } /* Wait until the stream is ready */ do { pa_threaded_mainloop_wait(m_MainLoop); CLog::Log(LOGDEBUG, "PulseAudio: Stream %s", StreamStateToString(pa_stream_get_state(m_Stream))); } while (pa_stream_get_state(m_Stream) != PA_STREAM_READY && pa_stream_get_state(m_Stream) != PA_STREAM_FAILED); if (pa_stream_get_state(m_Stream) == PA_STREAM_FAILED) { CLog::Log(LOGERROR, "PulseAudio: Waited for the stream but it failed"); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } const pa_buffer_attr *a; if (!(a = pa_stream_get_buffer_attr(m_Stream))) { CLog::Log(LOGERROR, "PulseAudio: %s", pa_strerror(pa_context_errno(m_Context))); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } else { unsigned int packetSize = a->minreq; m_BufferSize = a->tlength; m_periodSize = a->minreq; format.m_frames = packetSize / frameSize; } { CSingleLock lock(m_sec); // Register Callback for Sink changes pa_context_set_subscribe_callback(m_Context, SinkChangedCallback, this); const pa_subscription_mask_t mask = PA_SUBSCRIPTION_MASK_SINK; pa_operation *op = pa_context_subscribe(m_Context, mask, NULL, this); if (op != NULL) pa_operation_unref(op); // Register Callback for Sink Info changes - this handles volume pa_context_set_subscribe_callback(m_Context, SinkInputInfoChangedCallback, this); const pa_subscription_mask_t mask_input = PA_SUBSCRIPTION_MASK_SINK_INPUT; pa_operation* op_sinfo = pa_context_subscribe(m_Context, mask_input, NULL, this); if (op_sinfo != NULL) pa_operation_unref(op_sinfo); } pa_threaded_mainloop_unlock(m_MainLoop); format.m_frameSize = frameSize; format.m_frameSamples = format.m_frames * format.m_channelLayout.Count(); m_format = format; format.m_dataFormat = m_passthrough ? AE_FMT_S16NE : format.m_dataFormat; CLog::Log(LOGNOTICE, "PulseAudio: Opened device %s in %s mode with Buffersize %u ms", device.c_str(), m_passthrough ? "passthrough" : "pcm", (unsigned int) ((m_BufferSize / (float) m_BytesPerSecond) * 1000)); // Cork stream will resume when adding first package Pause(true); { CSingleLock lock(m_sec); m_IsAllocated = true; } return true; }
static gboolean gst_pulsesrc_open (GstAudioSrc * asrc) { GstPulseSrc *pulsesrc = GST_PULSESRC_CAST (asrc); gchar *name = gst_pulse_client_name (); pa_threaded_mainloop_lock (pulsesrc->mainloop); g_assert (!pulsesrc->context); g_assert (!pulsesrc->stream); GST_DEBUG_OBJECT (pulsesrc, "opening device"); if (!(pulsesrc->context = pa_context_new (pa_threaded_mainloop_get_api (pulsesrc->mainloop), name))) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to create context"), (NULL)); goto unlock_and_fail; } pa_context_set_state_callback (pulsesrc->context, gst_pulsesrc_context_state_cb, pulsesrc); GST_DEBUG_OBJECT (pulsesrc, "connect to server %s", GST_STR_NULL (pulsesrc->server)); if (pa_context_connect (pulsesrc->context, pulsesrc->server, 0, NULL) < 0) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } for (;;) { pa_context_state_t state; state = pa_context_get_state (pulsesrc->context); if (!PA_CONTEXT_IS_GOOD (state)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } if (state == PA_CONTEXT_READY) break; /* Wait until the context is ready */ pa_threaded_mainloop_wait (pulsesrc->mainloop); } GST_DEBUG_OBJECT (pulsesrc, "connected"); pa_threaded_mainloop_unlock (pulsesrc->mainloop); g_free (name); return TRUE; /* ERRORS */ unlock_and_fail: { gst_pulsesrc_destroy_context (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); g_free (name); return FALSE; } }
void Sounds::reload() { if (m_ctx) { destroy(); } // This can not happen but just in case if (m_loop) { destroy(); } m_loop = pa_threaded_mainloop_new(); pa_proplist *prop = pa_proplist_new(); #ifdef SAILFISH pa_proplist_sets(prop, PA_PROP_MEDIA_ROLE, "event"); #else pa_proplist_sets(prop, PA_PROP_MEDIA_ROLE, MEDIA_ROLE); #endif pa_proplist_sets(prop, PA_PROP_APPLICATION_NAME, qPrintable(QCoreApplication::instance()->applicationName())); #ifdef SAILFISH pa_proplist_sets(prop, PA_PROP_APPLICATION_PROCESS_BINARY, "ngfd"); #endif m_ctx = pa_context_new_with_proplist(pa_threaded_mainloop_get_api(m_loop), NULL, prop); pa_proplist_free(prop); if (!m_ctx) { qmlInfo(this) << "Failed to create pulse audio context"; return; } pa_context_set_state_callback(m_ctx, (pa_context_notify_cb_t)contextStateCallback, m_loop); if (pa_context_connect(m_ctx, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL) < 0) { pa_context_unref(m_ctx); m_ctx = 0; qmlInfo(this) << "Failed to connect to pulse audio"; return; } pa_threaded_mainloop_lock(m_loop); if (pa_threaded_mainloop_start(m_loop) < 0) { pa_threaded_mainloop_unlock(m_loop); pa_context_disconnect(m_ctx); pa_context_unref(m_ctx); m_ctx = 0; qmlInfo(this) << "Failed to start pulse audio loop"; return; } while (true) { bool out = false; switch (pa_context_get_state(m_ctx)) { case PA_CONTEXT_UNCONNECTED: case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME: pa_threaded_mainloop_wait(m_loop); continue; case PA_CONTEXT_READY: out = true; break; case PA_CONTEXT_FAILED: case PA_CONTEXT_TERMINATED: pa_threaded_mainloop_unlock(m_loop); pa_context_disconnect(m_ctx); pa_context_unref(m_ctx); m_ctx = 0; qmlInfo(this) << "Failed to connect to pulse audio server"; return; } if (out) { break; } } pa_context_set_state_callback(m_ctx, NULL, NULL); pa_threaded_mainloop_unlock(m_loop); for (QHash<QString, SoundFileInfo *>::const_iterator iter = m_files.begin(); iter != m_files.end(); iter++) { cache(iter.key()); } }
static gboolean gst_pulsesrc_prepare (GstAudioSrc * asrc, GstRingBufferSpec * spec) { pa_buffer_attr wanted; const pa_buffer_attr *actual; GstPulseSrc *pulsesrc = GST_PULSESRC_CAST (asrc); pa_threaded_mainloop_lock (pulsesrc->mainloop); wanted.maxlength = -1; wanted.tlength = -1; wanted.prebuf = 0; wanted.minreq = -1; wanted.fragsize = spec->segsize; GST_INFO_OBJECT (pulsesrc, "maxlength: %d", wanted.maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d", wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", wanted.prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d", wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d", wanted.fragsize); if (pa_stream_connect_record (pulsesrc->stream, pulsesrc->device, &wanted, PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NOT_MONOTONOUS | #ifdef HAVE_PULSE_0_9_11 PA_STREAM_ADJUST_LATENCY | #endif PA_STREAM_START_CORKED) < 0) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } pulsesrc->corked = TRUE; for (;;) { pa_stream_state_t state; state = pa_stream_get_state (pulsesrc->stream); if (!PA_STREAM_IS_GOOD (state)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } if (state == PA_STREAM_READY) break; /* Wait until the stream is ready */ pa_threaded_mainloop_wait (pulsesrc->mainloop); } /* get the actual buffering properties now */ actual = pa_stream_get_buffer_attr (pulsesrc->stream); GST_INFO_OBJECT (pulsesrc, "maxlength: %d", actual->maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d (wanted: %d)", actual->tlength, wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", actual->prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d (wanted %d)", actual->minreq, wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d (wanted %d)", actual->fragsize, wanted.fragsize); if (actual->fragsize >= wanted.fragsize) { spec->segsize = actual->fragsize; } else { spec->segsize = actual->fragsize * (wanted.fragsize / actual->fragsize); } spec->segtotal = actual->maxlength / spec->segsize; pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; unlock_and_fail: { gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return FALSE; } }
void Sounds::cache(const QString& id) { SoundFileInfo *info = m_files[id]; if (!info) { return; } if (info->path().isEmpty()) { return; } if (!m_ctx) { return; } FileReader h(info->path()); pa_sample_spec *spec = h.sampleSpec(); if (!spec) { qmlInfo(this) << "Failed to get a sample spec"; return; } if (!pa_sample_spec_valid(spec)) { qmlInfo(this) << "Failed to get a valid sample spec"; return; } // First we set the file duration info->setDuration(pa_bytes_to_usec(h.size(), spec)); pa_proplist *prop = pa_proplist_new(); pa_proplist_sets(prop, PA_PROP_MEDIA_ROLE, "event"); #ifdef SAILFISH pa_proplist_sets(prop, PA_PROP_MEDIA_NAME, "camera-event"); #else pa_proplist_sets(prop, PA_PROP_MEDIA_NAME, qPrintable(id)); #endif pa_proplist_sets(prop, PA_PROP_EVENT_ID, qPrintable(id)); pa_proplist_sets(prop, PA_PROP_MEDIA_FILENAME, qPrintable(info->path())); #ifdef SAILFISH pa_proplist_sets(prop, PA_PROP_APPLICATION_PROCESS_BINARY, "ngfd"); pa_stream *stream = pa_stream_new_with_proplist(m_ctx, "camera-event", spec, NULL, prop); #else pa_stream *stream = pa_stream_new_with_proplist(m_ctx, qPrintable(id), spec, NULL, prop); #endif pa_proplist_free(prop); if (!stream) { qmlInfo(this) << "Failed to create a pulse audio stream"; return; } pa_stream_set_state_callback(stream, (pa_stream_notify_cb_t)streamStateCallback, m_loop); pa_stream_set_write_callback(stream, (pa_stream_request_cb_t)streamRequestCallback, &h); pa_threaded_mainloop_lock(m_loop); if (pa_stream_connect_upload(stream, h.size()) < 0) { pa_stream_unref(stream); pa_threaded_mainloop_unlock(m_loop); qmlInfo(this) << "Failed to connect pulse audio stream"; return; } while (true) { bool out = false; switch (pa_stream_get_state(stream)) { case PA_STREAM_FAILED: qmlInfo(this) << "Failed to connect our stream to pulse audio " << pa_strerror(pa_context_errno(m_ctx)); pa_stream_disconnect(stream); pa_stream_unref(stream); pa_threaded_mainloop_unlock(m_loop); return; case PA_STREAM_TERMINATED: pa_threaded_mainloop_unlock(m_loop); out = true; break; case PA_STREAM_READY: case PA_STREAM_UNCONNECTED: case PA_STREAM_CREATING: pa_threaded_mainloop_wait(m_loop); continue; } if (out) { break; } } pa_stream_unref(stream); }
static void audin_pulse_open(IAudinDevice* device, AudinReceive receive, void* user_data) { pa_stream_state_t state; pa_buffer_attr buffer_attr = { 0 }; AudinPulseDevice* pulse = (AudinPulseDevice*) device; if (!pulse->context) return; if (!pulse->sample_spec.rate || pulse->stream) return; DEBUG_DVC(""); pulse->receive = receive; pulse->user_data = user_data; pa_threaded_mainloop_lock(pulse->mainloop); pulse->stream = pa_stream_new(pulse->context, "freerdp_audin", &pulse->sample_spec, NULL); if (!pulse->stream) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_DVC("pa_stream_new failed (%d)", pa_context_errno(pulse->context)); return; } pulse->bytes_per_frame = pa_frame_size(&pulse->sample_spec); pa_stream_set_state_callback(pulse->stream, audin_pulse_stream_state_callback, pulse); pa_stream_set_read_callback(pulse->stream, audin_pulse_stream_request_callback, pulse); buffer_attr.maxlength = (uint32_t) -1; buffer_attr.tlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; /* 500ms latency */ buffer_attr.fragsize = pa_usec_to_bytes(500000, &pulse->sample_spec); if (pa_stream_connect_record(pulse->stream, pulse->device_name[0] ? pulse->device_name : NULL, &buffer_attr, PA_STREAM_ADJUST_LATENCY) < 0) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_WARN("pa_stream_connect_playback failed (%d)", pa_context_errno(pulse->context)); return; } for (;;) { state = pa_stream_get_state(pulse->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { DEBUG_WARN("bad stream state (%d)", pa_context_errno(pulse->context)); break; } pa_threaded_mainloop_wait(pulse->mainloop); } pa_threaded_mainloop_unlock(pulse->mainloop); if (state == PA_STREAM_READY) { memset(&pulse->adpcm, 0, sizeof(ADPCM)); pulse->buffer = xzalloc(pulse->bytes_per_frame * pulse->frames_per_packet); pulse->buffer_frames = 0; DEBUG_DVC("connected"); } else { audin_pulse_close(device); } }
void Sounds::playAndBlock(const char *id) { if (isMuted()) { qmlInfo(this) << "not playing sounds while muted"; return; } if (!m_ctx) { qmlInfo(this) << "not connected to pulse audio"; return; } SoundFileInfo *info = m_files[id]; if (!info) { qmlInfo(this) << "unknown sound id " << id; return; } if (!info->duration()) { qmlInfo(this) << "unknown file duration"; return; } pa_threaded_mainloop_lock(m_loop); pa_operation *o = pa_context_play_sample(m_ctx, id, NULL, playbackVolume(), (pa_context_success_cb_t)contextSuccessCallback, m_loop); if (!o) { qmlInfo(this) << "failed to play sample " << id; return; } bool sleep = false; while (true) { bool out = false; switch (pa_operation_get_state(o)) { case PA_OPERATION_RUNNING: out = false; break; case PA_OPERATION_DONE: sleep = true; out = true; break; case PA_OPERATION_CANCELLED: sleep = false; out = true; break; } if (out) { break; } pa_threaded_mainloop_wait(m_loop); } pa_threaded_mainloop_unlock(m_loop); pa_operation_unref(o); // Sleep for the duration of the file: if (sleep) { usleep(info->duration()); } }
static int drvHostPulseAudioOpen(bool fIn, const char *pszName, pa_sample_spec *pSampleSpec, pa_buffer_attr *pBufAttr, pa_stream **ppStream) { AssertPtrReturn(pszName, VERR_INVALID_POINTER); AssertPtrReturn(pSampleSpec, VERR_INVALID_POINTER); AssertPtrReturn(pBufAttr, VERR_INVALID_POINTER); AssertPtrReturn(ppStream, VERR_INVALID_POINTER); if (!pa_sample_spec_valid(pSampleSpec)) { LogRel(("PulseAudio: Unsupported sample specification for stream \"%s\"\n", pszName)); return VERR_NOT_SUPPORTED; } int rc = VINF_SUCCESS; pa_stream *pStream = NULL; uint32_t flags = PA_STREAM_NOFLAGS; LogFunc(("Opening \"%s\", rate=%dHz, channels=%d, format=%s\n", pszName, pSampleSpec->rate, pSampleSpec->channels, pa_sample_format_to_string(pSampleSpec->format))); pa_threaded_mainloop_lock(g_pMainLoop); do { if (!(pStream = pa_stream_new(g_pContext, pszName, pSampleSpec, NULL /* pa_channel_map */))) { LogRel(("PulseAudio: Could not create stream \"%s\"\n", pszName)); rc = VERR_NO_MEMORY; break; } pa_stream_set_state_callback(pStream, drvHostPulseAudioCbStreamState, NULL); #if PA_API_VERSION >= 12 /* XXX */ flags |= PA_STREAM_ADJUST_LATENCY; #endif #if 0 /* Not applicable as we don't use pa_stream_get_latency() and pa_stream_get_time(). */ flags |= PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE; #endif /* No input/output right away after the stream was started. */ flags |= PA_STREAM_START_CORKED; if (fIn) { LogFunc(("Input stream attributes: maxlength=%d fragsize=%d\n", pBufAttr->maxlength, pBufAttr->fragsize)); if (pa_stream_connect_record(pStream, /*dev=*/NULL, pBufAttr, (pa_stream_flags_t)flags) < 0) { LogRel(("PulseAudio: Could not connect input stream \"%s\": %s\n", pszName, pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } else { LogFunc(("Output buffer attributes: maxlength=%d tlength=%d prebuf=%d minreq=%d\n", pBufAttr->maxlength, pBufAttr->tlength, pBufAttr->prebuf, pBufAttr->minreq)); if (pa_stream_connect_playback(pStream, /*dev=*/NULL, pBufAttr, (pa_stream_flags_t)flags, /*cvolume=*/NULL, /*sync_stream=*/NULL) < 0) { LogRel(("PulseAudio: Could not connect playback stream \"%s\": %s\n", pszName, pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } /* Wait until the stream is ready. */ for (;;) { if (!g_fAbortMainLoop) pa_threaded_mainloop_wait(g_pMainLoop); g_fAbortMainLoop = false; pa_stream_state_t sstate = pa_stream_get_state(pStream); if (sstate == PA_STREAM_READY) break; else if ( sstate == PA_STREAM_FAILED || sstate == PA_STREAM_TERMINATED) { LogRel(("PulseAudio: Failed to initialize stream \"%s\" (state %ld)\n", pszName, sstate)); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } if (RT_FAILURE(rc)) break; const pa_buffer_attr *pBufAttrObtained = pa_stream_get_buffer_attr(pStream); AssertPtr(pBufAttrObtained); memcpy(pBufAttr, pBufAttrObtained, sizeof(pa_buffer_attr)); if (fIn) LogFunc(("Obtained record buffer attributes: maxlength=%RU32, fragsize=%RU32\n", pBufAttr->maxlength, pBufAttr->fragsize)); else LogFunc(("Obtained playback buffer attributes: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d\n", pBufAttr->maxlength, pBufAttr->tlength, pBufAttr->prebuf, pBufAttr->minreq)); } while (0); if ( RT_FAILURE(rc) && pStream) pa_stream_disconnect(pStream); pa_threaded_mainloop_unlock(g_pMainLoop); if (RT_FAILURE(rc)) { if (pStream) pa_stream_unref(pStream); } else *ppStream = pStream; LogFlowFuncLeaveRC(rc); return rc; }
CPulseAEStream::CPulseAEStream(pa_context *context, pa_threaded_mainloop *mainLoop, enum AEDataFormat format, unsigned int sampleRate, CAEChannelInfo channelLayout, unsigned int options) : m_fader(this) { ASSERT(channelLayout.Count()); m_Destroyed = false; m_Initialized = false; m_Paused = false; m_Stream = NULL; m_Context = context; m_MainLoop = mainLoop; m_format = format; m_sampleRate = sampleRate; m_channelLayout = channelLayout; m_options = options; m_DrainOperation = NULL; m_slave = NULL; pa_threaded_mainloop_lock(m_MainLoop); m_SampleSpec.channels = channelLayout.Count(); m_SampleSpec.rate = m_sampleRate; switch (m_format) { case AE_FMT_U8 : m_SampleSpec.format = PA_SAMPLE_U8; break; case AE_FMT_S16NE : m_SampleSpec.format = PA_SAMPLE_S16NE; break; case AE_FMT_S16LE : m_SampleSpec.format = PA_SAMPLE_S16LE; break; case AE_FMT_S16BE : m_SampleSpec.format = PA_SAMPLE_S16BE; break; case AE_FMT_S24NE3: m_SampleSpec.format = PA_SAMPLE_S24NE; break; case AE_FMT_S24NE4: m_SampleSpec.format = PA_SAMPLE_S24_32NE; break; case AE_FMT_S32NE : m_SampleSpec.format = PA_SAMPLE_S32NE; break; case AE_FMT_S32LE : m_SampleSpec.format = PA_SAMPLE_S32LE; break; case AE_FMT_S32BE : m_SampleSpec.format = PA_SAMPLE_S32BE; break; case AE_FMT_FLOAT : m_SampleSpec.format = PA_SAMPLE_FLOAT32NE; break; #if PA_CHECK_VERSION(1,0,0) case AE_FMT_DTS : case AE_FMT_EAC3 : case AE_FMT_AC3 : m_SampleSpec.format = PA_SAMPLE_S16NE; break; #endif default: CLog::Log(LOGERROR, "PulseAudio: Invalid format %i", format); pa_threaded_mainloop_unlock(m_MainLoop); m_format = AE_FMT_INVALID; return; } if (!pa_sample_spec_valid(&m_SampleSpec)) { CLog::Log(LOGERROR, "PulseAudio: Invalid sample spec"); pa_threaded_mainloop_unlock(m_MainLoop); Destroy(); return /*false*/; } m_frameSize = pa_frame_size(&m_SampleSpec); struct pa_channel_map map; map.channels = m_channelLayout.Count(); for (unsigned int ch = 0; ch < m_channelLayout.Count(); ++ch) switch(m_channelLayout[ch]) { case AE_CH_NULL: break; case AE_CH_MAX : break; case AE_CH_RAW : break; case AE_CH_FL : map.map[ch] = PA_CHANNEL_POSITION_FRONT_LEFT ; break; case AE_CH_FR : map.map[ch] = PA_CHANNEL_POSITION_FRONT_RIGHT ; break; case AE_CH_FC : map.map[ch] = PA_CHANNEL_POSITION_FRONT_CENTER ; break; case AE_CH_BC : map.map[ch] = PA_CHANNEL_POSITION_REAR_CENTER ; break; case AE_CH_BL : map.map[ch] = PA_CHANNEL_POSITION_REAR_LEFT ; break; case AE_CH_BR : map.map[ch] = PA_CHANNEL_POSITION_REAR_RIGHT ; break; case AE_CH_LFE : map.map[ch] = PA_CHANNEL_POSITION_LFE ; break; case AE_CH_FLOC: map.map[ch] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER ; break; case AE_CH_FROC: map.map[ch] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER; break; case AE_CH_SL : map.map[ch] = PA_CHANNEL_POSITION_SIDE_LEFT ; break; case AE_CH_SR : map.map[ch] = PA_CHANNEL_POSITION_SIDE_RIGHT ; break; case AE_CH_TC : map.map[ch] = PA_CHANNEL_POSITION_TOP_CENTER ; break; case AE_CH_TFL : map.map[ch] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT ; break; case AE_CH_TFR : map.map[ch] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT ; break; case AE_CH_TFC : map.map[ch] = PA_CHANNEL_POSITION_TOP_CENTER ; break; case AE_CH_TBL : map.map[ch] = PA_CHANNEL_POSITION_TOP_REAR_LEFT ; break; case AE_CH_TBR : map.map[ch] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT ; break; case AE_CH_TBC : map.map[ch] = PA_CHANNEL_POSITION_TOP_REAR_CENTER ; break; default: break; } m_MaxVolume = CAEFactory::GetEngine()->GetVolume(); m_Volume = 1.0f; pa_volume_t paVolume = pa_sw_volume_from_linear((double)(m_Volume * m_MaxVolume)); pa_cvolume_set(&m_ChVolume, m_SampleSpec.channels, paVolume); #if PA_CHECK_VERSION(1,0,0) pa_format_info *info[1]; info[0] = pa_format_info_new(); switch(m_format) { case AE_FMT_DTS : info[0]->encoding = PA_ENCODING_DTS_IEC61937 ; break; case AE_FMT_EAC3: info[0]->encoding = PA_ENCODING_EAC3_IEC61937; break; case AE_FMT_AC3 : info[0]->encoding = PA_ENCODING_AC3_IEC61937 ; break; default: info[0]->encoding = PA_ENCODING_PCM ; break; } pa_format_info_set_rate (info[0], m_SampleSpec.rate); pa_format_info_set_channels (info[0], m_SampleSpec.channels); pa_format_info_set_channel_map (info[0], &map); pa_format_info_set_sample_format(info[0], m_SampleSpec.format); m_Stream = pa_stream_new_extended(m_Context, "audio stream", info, 1, NULL); pa_format_info_free(info[0]); #else m_Stream = pa_stream_new(m_Context, "audio stream", &m_SampleSpec, &map); #endif if (m_Stream == NULL) { CLog::Log(LOGERROR, "PulseAudio: Could not create a stream"); pa_threaded_mainloop_unlock(m_MainLoop); Destroy(); return /*false*/; } pa_stream_set_state_callback(m_Stream, CPulseAEStream::StreamStateCallback, this); pa_stream_set_write_callback(m_Stream, CPulseAEStream::StreamRequestCallback, this); pa_stream_set_latency_update_callback(m_Stream, CPulseAEStream::StreamLatencyUpdateCallback, this); pa_stream_set_underflow_callback(m_Stream, CPulseAEStream::StreamUnderflowCallback, this); int flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE; if (options && AESTREAM_FORCE_RESAMPLE) flags |= PA_STREAM_VARIABLE_RATE; if (pa_stream_connect_playback(m_Stream, NULL, NULL, (pa_stream_flags)flags, &m_ChVolume, NULL) < 0) { CLog::Log(LOGERROR, "PulseAudio: Failed to connect stream to output"); pa_threaded_mainloop_unlock(m_MainLoop); Destroy(); return /*false*/; } /* Wait until the stream is ready */ do { pa_threaded_mainloop_wait(m_MainLoop); CLog::Log(LOGDEBUG, "PulseAudio: Stream %s", StreamStateToString(pa_stream_get_state(m_Stream))); } while (pa_stream_get_state(m_Stream) != PA_STREAM_READY && pa_stream_get_state(m_Stream) != PA_STREAM_FAILED); if (pa_stream_get_state(m_Stream) == PA_STREAM_FAILED) { CLog::Log(LOGERROR, "PulseAudio: Waited for the stream but it failed"); pa_threaded_mainloop_unlock(m_MainLoop); Destroy(); return /*false*/; } m_cacheSize = pa_stream_writable_size(m_Stream); pa_threaded_mainloop_unlock(m_MainLoop); m_Initialized = true; CLog::Log(LOGINFO, "PulseAEStream::Initialized"); CLog::Log(LOGINFO, " Sample Rate : %d", m_sampleRate); CLog::Log(LOGINFO, " Sample Format : %s", CAEUtil::DataFormatToStr(m_format)); CLog::Log(LOGINFO, " Channel Count : %d", m_channelLayout.Count()); CLog::Log(LOGINFO, " Channel Layout: %s", ((std::string)m_channelLayout).c_str()); CLog::Log(LOGINFO, " Frame Size : %d", m_frameSize); CLog::Log(LOGINFO, " Cache Size : %d", m_cacheSize); Resume(); return /*true*/; }
void SetupSound (void) { int error_number; // Acquire mainloop /////////////////////////////////////////////////////// device.mainloop = pa_threaded_mainloop_new (); if (device.mainloop == NULL) { fprintf (stderr, "Could not acquire PulseAudio main loop\n"); return; } // Acquire context //////////////////////////////////////////////////////// device.api = pa_threaded_mainloop_get_api (device.mainloop); device.context = pa_context_new (device.api, "PCSXR"); pa_context_set_state_callback (device.context, context_state_cb, &device); if (device.context == NULL) { fprintf (stderr, "Could not acquire PulseAudio device context\n"); return; } // Connect to PulseAudio server /////////////////////////////////////////// if (pa_context_connect (device.context, NULL, 0, NULL) < 0) { error_number = pa_context_errno (device.context); fprintf (stderr, "Could not connect to PulseAudio server: %s\n", pa_strerror(error_number)); return; } // Run mainloop until sever context is ready ////////////////////////////// pa_threaded_mainloop_lock (device.mainloop); if (pa_threaded_mainloop_start (device.mainloop) < 0) { fprintf (stderr, "Could not start mainloop\n"); return; } pa_context_state_t context_state; context_state = pa_context_get_state (device.context); while (context_state != PA_CONTEXT_READY) { context_state = pa_context_get_state (device.context); if (! PA_CONTEXT_IS_GOOD (context_state)) { error_number = pa_context_errno (device.context); fprintf (stderr, "Context state is not good: %s\n", pa_strerror (error_number)); return; } else if (context_state == PA_CONTEXT_READY) break; else fprintf (stderr, "PulseAudio context state is %d\n", context_state); pa_threaded_mainloop_wait (device.mainloop); } // Set sample spec //////////////////////////////////////////////////////// device.spec.format = PA_SAMPLE_S16NE; if (iDisStereo) device.spec.channels = 1; else device.spec.channels = 2; device.spec.rate = settings.frequency; pa_buffer_attr buffer_attributes; buffer_attributes.tlength = pa_bytes_per_second (& device.spec) / 5; buffer_attributes.maxlength = buffer_attributes.tlength * 3; buffer_attributes.minreq = buffer_attributes.tlength / 3; buffer_attributes.prebuf = buffer_attributes.tlength; //maxlength = buffer_attributes.maxlength; //fprintf (stderr, "Total space: %u\n", buffer_attributes.maxlength); //fprintf (stderr, "Minimum request size: %u\n", buffer_attributes.minreq); //fprintf (stderr, "Bytes needed before playback: %u\n", buffer_attributes.prebuf); //fprintf (stderr, "Target buffer size: %lu\n", buffer_attributes.tlength); // Acquire new stream using spec ////////////////////////////////////////// device.stream = pa_stream_new (device.context, "PCSXR", &device.spec, NULL); if (device.stream == NULL) { error_number = pa_context_errno (device.context); fprintf (stderr, "Could not acquire new PulseAudio stream: %s\n", pa_strerror (error_number)); return; } // Set callbacks for server events //////////////////////////////////////// pa_stream_set_state_callback (device.stream, stream_state_cb, &device); pa_stream_set_write_callback (device.stream, stream_request_cb, &device); pa_stream_set_latency_update_callback (device.stream, stream_latency_update_cb, &device); // Ready stream for playback ////////////////////////////////////////////// pa_stream_flags_t flags = (pa_stream_flags_t) (PA_STREAM_ADJUST_LATENCY | PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE); //pa_stream_flags_t flags = (pa_stream_flags_t) (PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_EARLY_REQUESTS); if (pa_stream_connect_playback (device.stream, NULL, &buffer_attributes, flags, NULL, NULL) < 0) { pa_context_errno (device.context); fprintf (stderr, "Could not connect for playback: %s\n", pa_strerror (error_number)); return; } // Run mainloop until stream is ready ///////////////////////////////////// pa_stream_state_t stream_state; stream_state = pa_stream_get_state (device.stream); while (stream_state != PA_STREAM_READY) { stream_state = pa_stream_get_state (device.stream); if (stream_state == PA_STREAM_READY) break; else if (! PA_STREAM_IS_GOOD (stream_state)) { error_number = pa_context_errno (device.context); fprintf (stderr, "Stream state is not good: %s\n", pa_strerror (error_number)); return; } else fprintf (stderr, "PulseAudio stream state is %d\n", stream_state); pa_threaded_mainloop_wait (device.mainloop); } pa_threaded_mainloop_unlock (device.mainloop); fprintf (stderr, "PulseAudio should be connected\n"); return; }
int soundio_pulseaudio_init(struct SoundIoPrivate *si) { struct SoundIo *soundio = &si->pub; struct SoundIoPulseAudio *sipa = &si->backend_data.pulseaudio; sipa->device_scan_queued = true; sipa->main_loop = pa_threaded_mainloop_new(); if (!sipa->main_loop) { destroy_pa(si); return SoundIoErrorNoMem; } pa_mainloop_api *main_loop_api = pa_threaded_mainloop_get_api(sipa->main_loop); sipa->props = pa_proplist_new(); if (!sipa->props) { destroy_pa(si); return SoundIoErrorNoMem; } sipa->pulse_context = pa_context_new_with_proplist(main_loop_api, soundio->app_name, sipa->props); if (!sipa->pulse_context) { destroy_pa(si); return SoundIoErrorNoMem; } pa_context_set_subscribe_callback(sipa->pulse_context, subscribe_callback, si); pa_context_set_state_callback(sipa->pulse_context, context_state_callback, si); int err = pa_context_connect(sipa->pulse_context, NULL, (pa_context_flags_t)0, NULL); if (err) { destroy_pa(si); return SoundIoErrorInitAudioBackend; } if (pa_threaded_mainloop_start(sipa->main_loop)) { destroy_pa(si); return SoundIoErrorNoMem; } pa_threaded_mainloop_lock(sipa->main_loop); // block until ready while (!sipa->ready_flag) pa_threaded_mainloop_wait(sipa->main_loop); if (sipa->connection_err) { pa_threaded_mainloop_unlock(sipa->main_loop); destroy_pa(si); return sipa->connection_err; } if ((err = subscribe_to_events(si))) { pa_threaded_mainloop_unlock(sipa->main_loop); destroy_pa(si); return err; } pa_threaded_mainloop_unlock(sipa->main_loop); si->destroy = destroy_pa; si->flush_events = flush_events_pa; si->wait_events = wait_events_pa; si->wakeup = wakeup_pa; si->force_device_scan = force_device_scan_pa; si->outstream_open = outstream_open_pa; si->outstream_destroy = outstream_destroy_pa; si->outstream_start = outstream_start_pa; si->outstream_begin_write = outstream_begin_write_pa; si->outstream_end_write = outstream_end_write_pa; si->outstream_clear_buffer = outstream_clear_buffer_pa; si->outstream_pause = outstream_pause_pa; si->outstream_get_latency = outstream_get_latency_pa; si->instream_open = instream_open_pa; si->instream_destroy = instream_destroy_pa; si->instream_start = instream_start_pa; si->instream_begin_read = instream_begin_read_pa; si->instream_end_read = instream_end_read_pa; si->instream_pause = instream_pause_pa; si->instream_get_latency = instream_get_latency_pa; return 0; }
bool AudioOutputPulseAudio::ContextConnect(void) { QString fn_log_tag = "ContextConnect, "; if (pcontext) { VBERROR(fn_log_tag + "context appears to exist, but shouldn't (yet)"); pa_context_unref(pcontext); pcontext = NULL; return false; } pcontext = pa_context_new(pa_threaded_mainloop_get_api(mainloop), "MythTV"); if (!pcontext) { VBERROR(fn_log_tag + "failed to acquire new context"); return false; } pa_context_set_state_callback(pcontext, ContextStateCallback, this); char *pulse_host = ChooseHost(); int chk = pa_context_connect( pcontext, pulse_host, (pa_context_flags_t)0, NULL); delete(pulse_host); if (chk < 0) { VBERROR(fn_log_tag + QString("context connect failed: %1") .arg(pa_strerror(pa_context_errno(pcontext)))); return false; } bool connected = false; pa_context_state_t state = pa_context_get_state(pcontext); for (; !connected; state = pa_context_get_state(pcontext)) { switch(state) { case PA_CONTEXT_READY: VBAUDIO(fn_log_tag +"context connection ready"); connected = true; continue; case PA_CONTEXT_FAILED: case PA_CONTEXT_TERMINATED: VBERROR(fn_log_tag + QString("context connection failed or terminated: %1") .arg(pa_strerror(pa_context_errno(pcontext)))); return false; default: VBAUDIO(fn_log_tag + "waiting for context connection ready"); pa_threaded_mainloop_wait(mainloop); break; } } pa_operation *op = pa_context_get_server_info(pcontext, ServerInfoCallback, this); if (op) pa_operation_unref(op); else VBERROR(fn_log_tag + "failed to get PulseAudio server info"); return true; }
AudioDevPulseAudio::AudioDevPulseAudio(QObject *parent): AudioDev(parent) { this->d = new AudioDevPulseAudioPrivate(this); // Create a threaded main loop for PulseAudio this->d->m_mainLoop = pa_threaded_mainloop_new(); if (!this->d->m_mainLoop) return; // Start main loop. if (pa_threaded_mainloop_start(this->d->m_mainLoop) != 0) { pa_threaded_mainloop_free(this->d->m_mainLoop); this->d->m_mainLoop = nullptr; return; } pa_threaded_mainloop_lock(this->d->m_mainLoop); // Get main loop abstration layer. auto mainLoopApi = pa_threaded_mainloop_get_api(this->d->m_mainLoop); if (!mainLoopApi) { pa_threaded_mainloop_unlock(this->d->m_mainLoop); pa_threaded_mainloop_stop(this->d->m_mainLoop); pa_threaded_mainloop_free(this->d->m_mainLoop); this->d->m_mainLoop = nullptr; return; } // Get a PulseAudio context. this->d->m_context = pa_context_new(mainLoopApi, QCoreApplication::applicationName() .toStdString() .c_str()); if (!this->d->m_context) { pa_threaded_mainloop_unlock(this->d->m_mainLoop); pa_threaded_mainloop_stop(this->d->m_mainLoop); pa_threaded_mainloop_free(this->d->m_mainLoop); this->d->m_mainLoop = nullptr; return; } // We need to set a state callback in order to connect to the server. pa_context_set_state_callback(this->d->m_context, AudioDevPulseAudioPrivate::contextStateCallbackInit, this); // Connect to PulseAudio server. if (pa_context_connect(this->d->m_context, nullptr, PA_CONTEXT_NOFLAGS, nullptr) < 0) { pa_context_unref(this->d->m_context); this->d->m_context = nullptr; pa_threaded_mainloop_unlock(this->d->m_mainLoop); pa_threaded_mainloop_stop(this->d->m_mainLoop); pa_threaded_mainloop_free(this->d->m_mainLoop); this->d->m_mainLoop = nullptr; return; } static const QList<pa_context_state_t> expectedStates = { PA_CONTEXT_READY, PA_CONTEXT_FAILED, PA_CONTEXT_TERMINATED }; pa_context_state_t state; // Wait until the connection to the server is stablished. forever { state = pa_context_get_state(this->d->m_context); if (expectedStates.contains(state)) break; pa_threaded_mainloop_wait(this->d->m_mainLoop); } if (state != PA_CONTEXT_READY) { pa_context_disconnect(this->d->m_context); pa_context_unref(this->d->m_context); this->d->m_context = nullptr; pa_threaded_mainloop_unlock(this->d->m_mainLoop); pa_threaded_mainloop_stop(this->d->m_mainLoop); pa_threaded_mainloop_free(this->d->m_mainLoop); this->d->m_mainLoop = nullptr; return; } // Get server information. auto operation = pa_context_get_server_info(this->d->m_context, AudioDevPulseAudioPrivate::serverInfoCallback, this); while (pa_operation_get_state(operation) == PA_OPERATION_RUNNING) pa_threaded_mainloop_wait(this->d->m_mainLoop); pa_operation_unref(operation); // Get sources information. operation = pa_context_get_source_info_list(this->d->m_context, AudioDevPulseAudioPrivate::sourceInfoCallback, this); while (pa_operation_get_state(operation) == PA_OPERATION_RUNNING) pa_threaded_mainloop_wait(this->d->m_mainLoop); pa_operation_unref(operation); // Get sinks information. operation = pa_context_get_sink_info_list(this->d->m_context, AudioDevPulseAudioPrivate::sinkInfoCallback, this); while (pa_operation_get_state(operation) == PA_OPERATION_RUNNING) pa_threaded_mainloop_wait(this->d->m_mainLoop); pa_operation_unref(operation); pa_context_set_subscribe_callback(this->d->m_context, AudioDevPulseAudioPrivate::deviceUpdateCallback, this); pa_operation_unref(pa_context_subscribe(this->d->m_context, pa_subscription_mask_t(PA_SUBSCRIPTION_MASK_SINK | PA_SUBSCRIPTION_MASK_SOURCE | PA_SUBSCRIPTION_MASK_SERVER), nullptr, this)); pa_threaded_mainloop_unlock(this->d->m_mainLoop); }