/* * Initialize pulseAudio */ static int init_pa(recorder_context_t *rctx) { pa_mainloop_api *pa_mlapi; pa_proplist *ctx_properties = pa_proplist_new(); pa_proplist *stream_properties = pa_proplist_new(); int retval = 0; rctx->pa_ml = pa_mainloop_new(); pa_mlapi = pa_mainloop_get_api(rctx->pa_ml); rctx->pa_ctx = pa_context_new_with_proplist(pa_mlapi, "NoApp recorder", ctx_properties); pa_context_connect(rctx->pa_ctx, NULL, 0, NULL); rctx->pa_ready = 0; pa_context_set_state_callback(rctx->pa_ctx, pa_state_cb, rctx); while (rctx->pa_ready == 0){ pa_mainloop_iterate(rctx->pa_ml, 1, NULL); } if (rctx->pa_ready == 2){ retval = -1; goto exit; } rctx->recording_stream = pa_stream_new_with_proplist(rctx->pa_ctx, "NoApp recorder", &rctx->pa_ss, NULL, stream_properties); retval = pa_stream_connect_record(rctx->recording_stream, NULL, NULL, 0); if (retval < 0){ Log(LOG_ERR, "pa_stream_connect_playback failed\n"); goto exit; } exit: return retval; }
JNIEXPORT jlong JNICALL Java_com_harrcharr_pulse_Stream_JNINewStream( JNIEnv *jenv, jclass jcls, jlong c, jstring server) { pa_sample_spec ss; pa_stream *stream; ss.channels = 1; ss.format = PA_SAMPLE_FLOAT32; ss.rate = 25; pa_proplist *p = pa_proplist_new(); pa_proplist_sets(p, PA_PROP_APPLICATION_NAME, "Reverb PulseAudio Remote"); const char *sname; sname = (*jenv)->GetStringUTFChars(jenv, server, NULL); if (sname == NULL) { return NULL; /* OutOfMemoryError already thrown */ } if (!(stream = pa_stream_new_with_proplist((pa_context*)c, sname, &ss, NULL, p))) { LOGE("Failed to create new stream"); stream = NULL; } (*jenv)->ReleaseStringUTFChars(jenv, server, sname); return stream; }
APULSE_EXPORT pa_stream * pa_stream_new(pa_context *c, const char *name, const pa_sample_spec *ss, const pa_channel_map *map) { trace_info_f("F %s c=%p, name=%s, ss=%p, map=%p\n", __func__, c, name, ss, map); pa_proplist *p = pa_proplist_new(); pa_stream *s = pa_stream_new_with_proplist(c, name, ss, map, p); pa_proplist_free(p); return s; }
static int m_pa_stream_connect(pa_context *pa_ctx) { if (pa_context_get_server_protocol_version (pa_ctx) < 13) { return -1; } printf("server version: %d\n", pa_context_get_server_protocol_version(pa_ctx)); if (s) { pa_stream_disconnect(s); pa_stream_unref(s); } pa_proplist *proplist; pa_buffer_attr attr; pa_sample_spec ss; int res; //char dev_name[40]; // pa_sample_spec ss.channels = 1; ss.format = PA_SAMPLE_FLOAT32; ss.rate = 25; // pa_buffer_attr memset(&attr, 0, sizeof(attr)); attr.fragsize = sizeof(float); attr.maxlength = (uint32_t) -1; // pa_proplist proplist = pa_proplist_new (); pa_proplist_sets (proplist, PA_PROP_APPLICATION_ID, "Deepin Sound Settings"); // create new stream if (!(s = pa_stream_new_with_proplist(pa_ctx, "Deepin Sound Settings", &ss, NULL, proplist))) { fprintf(stderr, "pa_stream_new error\n"); return -2; } pa_proplist_free(proplist); pa_stream_set_read_callback(s, on_monitor_read_callback, NULL); pa_stream_set_suspended_callback(s, on_monitor_suspended_callback, NULL); res = pa_stream_connect_record(s, NULL, &attr, (pa_stream_flags_t) (PA_STREAM_DONT_MOVE |PA_STREAM_PEAK_DETECT |PA_STREAM_ADJUST_LATENCY)); if (res < 0) { fprintf(stderr, "Failed to connect monitoring stream\n"); return -3; } return 0; }
/* * Create a new pulse audio stream and connect to it * * Return a negative value on error */ static int pulse_connect_stream(struct pulse_data *data) { pa_sample_spec spec; spec.format = data->format; spec.rate = data->samples_per_sec; spec.channels = get_audio_channels(data->speakers); if (!pa_sample_spec_valid(&spec)) { blog(LOG_ERROR, "pulse-input: Sample spec is not valid"); return -1; } data->bytes_per_frame = pa_frame_size(&spec); blog(LOG_DEBUG, "pulse-input: %u bytes per frame", (unsigned int) data->bytes_per_frame); pa_buffer_attr attr; attr.fragsize = get_buffer_size(data, 250); attr.maxlength = (uint32_t) -1; attr.minreq = (uint32_t) -1; attr.prebuf = (uint32_t) -1; attr.tlength = (uint32_t) -1; data->stream = pa_stream_new_with_proplist(data->context, obs_source_getname(data->source), &spec, NULL, data->props); if (!data->stream) { blog(LOG_ERROR, "pulse-input: Unable to create stream"); return -1; } pa_stream_flags_t flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY; if (pa_stream_connect_record(data->stream, NULL, &attr, flags) < 0) { blog(LOG_ERROR, "pulse-input: Unable to connect to stream"); return -1; } for (;;) { pulse_iterate(data); pa_stream_state_t state = pa_stream_get_state(data->stream); if (state == PA_STREAM_READY) { blog(LOG_DEBUG, "pulse-input: Stream ready"); break; } if (!PA_STREAM_IS_GOOD(state)) { blog(LOG_ERROR, "pulse-input: Stream connect failed"); return -1; } } return 0; }
pa_stream* pulse_stream_new(const char* name, const pa_sample_spec* ss, const pa_channel_map* map) { if (pulse_context_ready() < 0) return NULL; pulse_lock(); pa_proplist *p = pulse_properties(); pa_stream *s = pa_stream_new_with_proplist( pulse_context, name, ss, map, p); pa_proplist_free(p); pulse_unlock(); return s; }
static pa_stream *connect_record_stream(const char *device_name, pa_threaded_mainloop *loop, pa_context *context, pa_stream_flags_t flags, pa_buffer_attr *attr, pa_sample_spec *spec, pa_channel_map *chanmap) { pa_stream_state_t state; pa_stream *stream; stream = pa_stream_new_with_proplist(context, "Capture Stream", spec, chanmap, prop_filter); if(!stream) { ERR("pa_stream_new_with_proplist() failed: %s\n", pa_strerror(pa_context_errno(context))); return NULL; } pa_stream_set_state_callback(stream, stream_state_callback, loop); if(pa_stream_connect_record(stream, device_name, attr, flags) < 0) { ERR("Stream did not connect: %s\n", pa_strerror(pa_context_errno(context))); pa_stream_unref(stream); return NULL; } while((state=pa_stream_get_state(stream)) != PA_STREAM_READY) { if(!PA_STREAM_IS_GOOD(state)) { ERR("Stream did not get ready: %s\n", pa_strerror(pa_context_errno(context))); pa_stream_unref(stream); return NULL; } pa_threaded_mainloop_wait(loop); } pa_stream_set_state_callback(stream, NULL, NULL); return stream; }
JNIEXPORT jlong JNICALL Java_org_jitsi_impl_neomedia_pulseaudio_PA_stream_1new_1with_1proplist (JNIEnv *env, jclass clazz, jlong c, jstring name, jlong ss, jlong map, jlong p) { const char *nameChars = name ? (*env)->GetStringUTFChars(env, name, NULL) : NULL; pa_stream *stream; if ((*env)->ExceptionCheck(env)) stream = NULL; else { stream = pa_stream_new_with_proplist( (pa_context *) (intptr_t) c, nameChars, (const pa_sample_spec *) (intptr_t) ss, (const pa_channel_map *) (intptr_t) map, (pa_proplist *) (intptr_t) p); (*env)->ReleaseStringUTFChars(env, name, nameChars); } return (intptr_t) stream; }
static gboolean gst_pulsesrc_create_stream (GstPulseSrc * pulsesrc, GstCaps * caps) { pa_channel_map channel_map; GstStructure *s; gboolean need_channel_layout = FALSE; GstRingBufferSpec spec; const gchar *name; memset (&spec, 0, sizeof (GstRingBufferSpec)); spec.latency_time = GST_SECOND; if (!gst_ring_buffer_parse_caps (&spec, caps)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Can't parse caps."), (NULL)); goto fail; } /* Keep the refcount of the caps at 1 to make them writable */ gst_caps_unref (spec.caps); if (!gst_pulse_fill_sample_spec (&spec, &pulsesrc->sample_spec)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Invalid sample specification."), (NULL)); goto fail; } pa_threaded_mainloop_lock (pulsesrc->mainloop); if (!pulsesrc->context) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Bad context"), (NULL)); goto unlock_and_fail; } s = gst_caps_get_structure (caps, 0); if (!gst_structure_has_field (s, "channel-layout") || !gst_pulse_gst_to_channel_map (&channel_map, &spec)) { if (spec.channels == 1) pa_channel_map_init_mono (&channel_map); else if (spec.channels == 2) pa_channel_map_init_stereo (&channel_map); else need_channel_layout = TRUE; } name = "Record Stream"; if (pulsesrc->proplist) { if (!(pulsesrc->stream = pa_stream_new_with_proplist (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map, pulsesrc->proplist))) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to create stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } } else if (!(pulsesrc->stream = pa_stream_new (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map))) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to create stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } if (need_channel_layout) { const pa_channel_map *m = pa_stream_get_channel_map (pulsesrc->stream); gst_pulse_channel_map_to_gst (m, &spec); caps = spec.caps; } GST_DEBUG_OBJECT (pulsesrc, "Caps are %" GST_PTR_FORMAT, caps); pa_stream_set_state_callback (pulsesrc->stream, gst_pulsesrc_stream_state_cb, pulsesrc); pa_stream_set_read_callback (pulsesrc->stream, gst_pulsesrc_stream_request_cb, pulsesrc); pa_stream_set_underflow_callback (pulsesrc->stream, gst_pulsesrc_stream_underflow_cb, pulsesrc); pa_stream_set_overflow_callback (pulsesrc->stream, gst_pulsesrc_stream_overflow_cb, pulsesrc); pa_stream_set_latency_update_callback (pulsesrc->stream, gst_pulsesrc_stream_latency_update_cb, pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; unlock_and_fail: gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); fail: return FALSE; }
static void context_state_cb(pa_context *c, void *userdata) { struct userdata *u = userdata; pa_assert(u); switch (pa_context_get_state(c)) { case PA_CONTEXT_UNCONNECTED: case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME: break; case PA_CONTEXT_READY: { pa_proplist *proplist; pa_buffer_attr bufferattr; pa_usec_t requested_latency; char *username = pa_get_user_name_malloc(); char *hostname = pa_get_host_name_malloc(); /* TODO: old tunnel put here the remote sink_name into stream name e.g. 'Null Output for lynxis@lazus' */ char *stream_name = pa_sprintf_malloc(_("Tunnel for %s@%s"), username, hostname); pa_xfree(hostname); pa_xfree(username); pa_log_debug("Connection successful. Creating stream."); pa_assert(!u->stream); proplist = tunnel_new_proplist(u); if(u->transcode.encoding != -1) { unsigned int n_formats = 1; pa_format_info *formats[1]; formats[0] = pa_format_info_new(); formats[0]->encoding = u->transcode.encoding; pa_format_info_set_sample_format(formats[0], u->sink->sample_spec.format); pa_format_info_set_rate(formats[0], u->sink->sample_spec.rate); pa_format_info_set_channels(formats[0], u->sink->sample_spec.channels); pa_format_info_set_channel_map(formats[0], &u->sink->channel_map); pa_transcode_set_format_info(&u->transcode, formats[0]); u->stream = pa_stream_new_extended(u->context, stream_name, formats, n_formats, proplist); } else u->stream = pa_stream_new_with_proplist(u->context, stream_name, &u->sink->sample_spec, &u->sink->channel_map, proplist); pa_proplist_free(proplist); pa_xfree(stream_name); if (!u->stream) { pa_log_error("Could not create a stream."); u->thread_mainloop_api->quit(u->thread_mainloop_api, TUNNEL_THREAD_FAILED_MAINLOOP); return; } requested_latency = pa_sink_get_requested_latency_within_thread(u->sink); if (requested_latency == (pa_usec_t) -1) requested_latency = u->sink->thread_info.max_latency; reset_bufferattr(&bufferattr); bufferattr.tlength = pa_usec_to_bytes(requested_latency, &u->sink->sample_spec); pa_stream_set_state_callback(u->stream, stream_state_cb, userdata); pa_stream_set_buffer_attr_callback(u->stream, stream_changed_buffer_attr_cb, userdata); if (pa_stream_connect_playback(u->stream, u->remote_sink_name, &bufferattr, PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_DONT_MOVE | PA_STREAM_START_CORKED | PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL) < 0) { pa_log_error("Could not connect stream."); u->thread_mainloop_api->quit(u->thread_mainloop_api, TUNNEL_THREAD_FAILED_MAINLOOP); } u->connected = true; break; } case PA_CONTEXT_FAILED: pa_log_debug("Context failed: %s.", pa_strerror(pa_context_errno(u->context))); u->connected = false; u->thread_mainloop_api->quit(u->thread_mainloop_api, TUNNEL_THREAD_FAILED_MAINLOOP); break; case PA_CONTEXT_TERMINATED: pa_log_debug("Context terminated."); u->connected = false; u->thread_mainloop_api->quit(u->thread_mainloop_api, TUNNEL_THREAD_FAILED_MAINLOOP); break; } }
void Sounds::cache(const QString& id) { SoundFileInfo *info = m_files[id]; if (!info) { return; } if (info->path().isEmpty()) { return; } if (!m_ctx) { return; } FileReader h(info->path()); pa_sample_spec *spec = h.sampleSpec(); if (!spec) { qmlInfo(this) << "Failed to get a sample spec"; return; } if (!pa_sample_spec_valid(spec)) { qmlInfo(this) << "Failed to get a valid sample spec"; return; } // First we set the file duration info->setDuration(pa_bytes_to_usec(h.size(), spec)); pa_proplist *prop = pa_proplist_new(); pa_proplist_sets(prop, PA_PROP_MEDIA_ROLE, "event"); #ifdef SAILFISH pa_proplist_sets(prop, PA_PROP_MEDIA_NAME, "camera-event"); #else pa_proplist_sets(prop, PA_PROP_MEDIA_NAME, qPrintable(id)); #endif pa_proplist_sets(prop, PA_PROP_EVENT_ID, qPrintable(id)); pa_proplist_sets(prop, PA_PROP_MEDIA_FILENAME, qPrintable(info->path())); #ifdef SAILFISH pa_proplist_sets(prop, PA_PROP_APPLICATION_PROCESS_BINARY, "ngfd"); pa_stream *stream = pa_stream_new_with_proplist(m_ctx, "camera-event", spec, NULL, prop); #else pa_stream *stream = pa_stream_new_with_proplist(m_ctx, qPrintable(id), spec, NULL, prop); #endif pa_proplist_free(prop); if (!stream) { qmlInfo(this) << "Failed to create a pulse audio stream"; return; } pa_stream_set_state_callback(stream, (pa_stream_notify_cb_t)streamStateCallback, m_loop); pa_stream_set_write_callback(stream, (pa_stream_request_cb_t)streamRequestCallback, &h); pa_threaded_mainloop_lock(m_loop); if (pa_stream_connect_upload(stream, h.size()) < 0) { pa_stream_unref(stream); pa_threaded_mainloop_unlock(m_loop); qmlInfo(this) << "Failed to connect pulse audio stream"; return; } while (true) { bool out = false; switch (pa_stream_get_state(stream)) { case PA_STREAM_FAILED: qmlInfo(this) << "Failed to connect our stream to pulse audio " << pa_strerror(pa_context_errno(m_ctx)); pa_stream_disconnect(stream); pa_stream_unref(stream); pa_threaded_mainloop_unlock(m_loop); return; case PA_STREAM_TERMINATED: pa_threaded_mainloop_unlock(m_loop); out = true; break; case PA_STREAM_READY: case PA_STREAM_UNCONNECTED: case PA_STREAM_CREATING: pa_threaded_mainloop_wait(m_loop); continue; } if (out) { break; } } pa_stream_unref(stream); }
int main(int argc, const char *argv[]) { pa_mainloop *pa_ml = NULL; pa_mainloop_api *pa_mlapi = NULL; pa_operation *pa_op = NULL; pa_context *pa_ctx = NULL; int pa_ready = 0; int state = 0; pa_ml = pa_mainloop_new(); pa_mlapi = pa_mainloop_get_api(pa_ml); pa_ctx = pa_context_new(pa_mlapi, "deepin"); pa_context_connect(pa_ctx, NULL, 0, NULL); pa_context_set_state_callback(pa_ctx, pa_state_cb, &pa_ready); for (;;) { if (0 == pa_ready) { pa_mainloop_iterate(pa_ml, 1, NULL); continue; } if (2 == pa_ready) { pa_context_disconnect(pa_ctx); pa_context_unref(pa_ctx); pa_mainloop_free(pa_ml); return -1; } switch (state) { case 0: if (pa_context_get_server_protocol_version (pa_ctx) < 13) { return -1; } printf("server version: %d\n", pa_context_get_server_protocol_version(pa_ctx)); pa_stream *s = NULL; pa_proplist *proplist; pa_buffer_attr attr; pa_sample_spec ss; int res; char dev_name[40]; // pa_sample_spec ss.channels = 1; ss.format = PA_SAMPLE_FLOAT32; ss.rate = 25; // pa_buffer_attr memset(&attr, 0, sizeof(attr)); attr.fragsize = sizeof(float); attr.maxlength = (uint32_t) -1; // pa_proplist proplist = pa_proplist_new (); pa_proplist_sets (proplist, PA_PROP_APPLICATION_ID, "deepin.sound"); // create new stream if (!(s = pa_stream_new_with_proplist(pa_ctx, "Peak detect", &ss, NULL, proplist))) { fprintf(stderr, "pa_stream_new error\n"); return -2; } pa_proplist_free(proplist); /*pa_stream_set_monitor_stream(s, 26);*/ pa_stream_set_read_callback(s, on_monitor_read_callback, NULL); pa_stream_set_suspended_callback(s, on_monitor_suspended_callback, NULL); res = pa_stream_connect_record(s, NULL, &attr, (pa_stream_flags_t) (PA_STREAM_DONT_MOVE |PA_STREAM_PEAK_DETECT |PA_STREAM_ADJUST_LATENCY)); if (res < 0) { fprintf(stderr, "Failed to connect monitoring stream\n"); return -3; } state++; break; case 1: usleep(100); break; case 2: return 0; break; default: return -1; } pa_mainloop_iterate(pa_ml, 1, NULL); } return 0; }
APULSE_EXPORT pa_stream * pa_stream_new_extended(pa_context *c, const char *name, pa_format_info *const *formats, unsigned int n_formats, pa_proplist *p) { trace_info_f("P %s c=%p, name=%s, formats=%p, n_formats=%u, p=%p\n", __func__, c, name, formats, n_formats, p); // TODO: multiple formats? // take first format if (n_formats < 1) { trace_error("%s, no formats\n", __func__); return NULL; } pa_sample_spec ss = { .format = PA_SAMPLE_S16LE, .rate = 48000, .channels = 2, }; const char *val; val = pa_proplist_gets(formats[0]->plist, PA_PROP_FORMAT_SAMPLE_FORMAT); if (val) ss.format = pa_sample_format_from_string(val); val = pa_proplist_gets(formats[0]->plist, PA_PROP_FORMAT_RATE); if (val) ss.rate = atoi(val); val = pa_proplist_gets(formats[0]->plist, PA_PROP_FORMAT_CHANNELS); if (val) ss.channels = atoi(val); return pa_stream_new_with_proplist(c, name, &ss, NULL, p); } APULSE_EXPORT pa_stream * pa_stream_new_with_proplist(pa_context *c, const char *name, const pa_sample_spec *ss, const pa_channel_map *map, pa_proplist *p) { trace_info_f("F %s c=%p, name=%s, ss={.format=%d, .rate=%u, .channels=%u}, map=%p, p=%p\n", __func__, c, name, ss->format, ss->rate, ss->channels, map, p); pa_stream *s = calloc(1, sizeof(pa_stream)); s->c = c; s->ref_cnt = 1; s->state = PA_STREAM_UNCONNECTED; s->ss = *ss; s->idx = c->next_stream_idx ++; g_hash_table_insert(c->streams_ht, GINT_TO_POINTER(s->idx), s); // fill initial values of s->timing_info gettimeofday(&s->timing_info.timestamp, NULL); s->timing_info.synchronized_clocks = 1; s->timing_info.sink_usec = 0; s->timing_info.source_usec = 0; s->timing_info.transport_usec = 0; s->timing_info.playing = 1; s->timing_info.write_index_corrupt = 0; s->timing_info.write_index = 0; s->timing_info.read_index_corrupt = 0; s->timing_info.read_index = 0; s->timing_info.configured_sink_usec = 0; s->timing_info.configured_source_usec = 0; s->timing_info.since_underrun = 0; s->rb = ringbuffer_new(72 * 1024); // TODO: figure out size s->peek_buffer = malloc(s->rb->end - s->rb->start); return s; }
/** * Setup a new stream based on the properties of the given audio_buf */ static void stream_setup(pa_audio_mode_t *pam, audio_buf_t *ab) { pa_stream *s; char buf[100]; int flags = 0; #if PA_API_VERSION >= 12 pa_proplist *pl; media_pipe_t *mp = ab->ab_mp; #endif pa_channel_map map; pa_cvolume cv; memset(&pam->ss, 0, sizeof(pa_sample_spec)); pam->ss.format = ab->ab_isfloat ? PA_SAMPLE_FLOAT32NE : PA_SAMPLE_S16NE; pam->ss.rate = ab->ab_samplerate; switch(ab->ab_format) { case AM_FORMAT_PCM_STEREO: pam->ss.channels = 2; pa_channel_map_init_stereo(&map); break; case AM_FORMAT_PCM_5DOT0: pam->ss.channels = 5; pa_channel_map_init(&map); map.channels = 5; map.map[0] = PA_CHANNEL_POSITION_LEFT; map.map[1] = PA_CHANNEL_POSITION_RIGHT; map.map[2] = PA_CHANNEL_POSITION_CENTER; map.map[3] = PA_CHANNEL_POSITION_SIDE_LEFT; map.map[4] = PA_CHANNEL_POSITION_SIDE_RIGHT; break; case AM_FORMAT_PCM_5DOT1: pam->ss.channels = 6; pa_channel_map_init(&map); map.channels = 6; map.map[0] = PA_CHANNEL_POSITION_LEFT; map.map[1] = PA_CHANNEL_POSITION_RIGHT; map.map[2] = PA_CHANNEL_POSITION_CENTER; map.map[3] = PA_CHANNEL_POSITION_LFE; map.map[4] = PA_CHANNEL_POSITION_SIDE_LEFT; map.map[5] = PA_CHANNEL_POSITION_SIDE_RIGHT; break; case AM_FORMAT_PCM_7DOT1: pam->ss.channels = 8; pa_channel_map_init(&map); map.channels = 8; map.map[0] = PA_CHANNEL_POSITION_LEFT; map.map[1] = PA_CHANNEL_POSITION_RIGHT; map.map[2] = PA_CHANNEL_POSITION_CENTER; map.map[3] = PA_CHANNEL_POSITION_LFE; map.map[4] = PA_CHANNEL_POSITION_SIDE_LEFT; map.map[5] = PA_CHANNEL_POSITION_SIDE_RIGHT; map.map[6] = PA_CHANNEL_POSITION_REAR_LEFT; map.map[7] = PA_CHANNEL_POSITION_REAR_RIGHT; break; case AM_FORMAT_PCM_6DOT1: pam->ss.channels = 7; pa_channel_map_init(&map); map.channels = 7; map.map[0] = PA_CHANNEL_POSITION_LEFT; map.map[1] = PA_CHANNEL_POSITION_RIGHT; map.map[2] = PA_CHANNEL_POSITION_CENTER; map.map[3] = PA_CHANNEL_POSITION_LFE; map.map[4] = PA_CHANNEL_POSITION_SIDE_LEFT; map.map[5] = PA_CHANNEL_POSITION_SIDE_RIGHT; map.map[6] = PA_CHANNEL_POSITION_REAR_CENTER; break; default: abort(); } TRACE(TRACE_DEBUG, "PA", "Created stream %s", pa_sample_spec_snprint(buf, sizeof(buf), &pam->ss)); #if PA_API_VERSION >= 12 pl = pa_proplist_new(); if(mp->mp_flags & MP_VIDEO) pa_proplist_sets(pl, PA_PROP_MEDIA_ROLE, "video"); else pa_proplist_sets(pl, PA_PROP_MEDIA_ROLE, "music"); s = pa_stream_new_with_proplist(pam->context, "Showtime playback", &pam->ss, &map, pl); pa_proplist_free(pl); #else s = pa_stream_new(pam->context, "Showtime playback", &pam->ss, &map); #endif pa_stream_set_state_callback(s, stream_state_callback, pam); pa_stream_set_write_callback(s, stream_write_callback, pam); flags |= PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_INTERPOLATE_TIMING; memset(&cv, 0, sizeof(cv)); pa_cvolume_set(&cv, pam->ss.channels, pam->mastervol); #if 1 pa_buffer_attr pba = {0}; pba.fragsize = (uint32_t)-1; pba.maxlength = 16 * 1024; pba.minreq = 3 * 1024; pba.prebuf = 8 * 1024; pba.tlength = 12 * 1024; #endif pa_stream_connect_playback(s, NULL, &pba, flags, &cv, NULL); pam->stream = s; pam->cur_rate = ab->ab_samplerate; pam->cur_format = ab->ab_format; pam->cur_isfloat = ab->ab_isfloat; }
static gboolean gst_pulsesrc_create_stream (GstPulseSrc * pulsesrc, GstCaps ** caps) { pa_channel_map channel_map; const pa_channel_map *m; GstStructure *s; gboolean need_channel_layout = FALSE; GstAudioRingBufferSpec spec; const gchar *name; s = gst_caps_get_structure (*caps, 0); gst_structure_get_int (s, "channels", &spec.info.channels); if (!gst_structure_has_field (s, "channel-mask")) { if (spec.info.channels == 1) { pa_channel_map_init_mono (&channel_map); } else if (spec.info.channels == 2) { gst_structure_set (s, "channel-mask", GST_TYPE_BITMASK, GST_AUDIO_CHANNEL_POSITION_MASK (FRONT_LEFT) | GST_AUDIO_CHANNEL_POSITION_MASK (FRONT_RIGHT), NULL); pa_channel_map_init_stereo (&channel_map); } else { need_channel_layout = TRUE; gst_structure_set (s, "channel-mask", GST_TYPE_BITMASK, G_GUINT64_CONSTANT (0), NULL); } } memset (&spec, 0, sizeof (GstAudioRingBufferSpec)); spec.latency_time = GST_SECOND; if (!gst_audio_ring_buffer_parse_caps (&spec, *caps)) goto invalid_caps; /* Keep the refcount of the caps at 1 to make them writable */ gst_caps_unref (spec.caps); if (!need_channel_layout && !gst_pulse_gst_to_channel_map (&channel_map, &spec)) { need_channel_layout = TRUE; gst_structure_set (s, "channel-mask", GST_TYPE_BITMASK, G_GUINT64_CONSTANT (0), NULL); memset (spec.info.position, 0xff, sizeof (spec.info.position)); } if (!gst_pulse_fill_sample_spec (&spec, &pulsesrc->sample_spec)) goto invalid_spec; pa_threaded_mainloop_lock (pulsesrc->mainloop); if (!pulsesrc->context) goto bad_context; name = "Record Stream"; if (pulsesrc->proplist) { if (!(pulsesrc->stream = pa_stream_new_with_proplist (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map, pulsesrc->proplist))) goto create_failed; } else if (!(pulsesrc->stream = pa_stream_new (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map))) goto create_failed; m = pa_stream_get_channel_map (pulsesrc->stream); gst_pulse_channel_map_to_gst (m, &spec); gst_audio_channel_positions_to_valid_order (spec.info.position, spec.info.channels); gst_caps_unref (*caps); *caps = gst_audio_info_to_caps (&spec.info); GST_DEBUG_OBJECT (pulsesrc, "Caps are %" GST_PTR_FORMAT, *caps); pa_stream_set_state_callback (pulsesrc->stream, gst_pulsesrc_stream_state_cb, pulsesrc); pa_stream_set_read_callback (pulsesrc->stream, gst_pulsesrc_stream_request_cb, pulsesrc); pa_stream_set_underflow_callback (pulsesrc->stream, gst_pulsesrc_stream_underflow_cb, pulsesrc); pa_stream_set_overflow_callback (pulsesrc->stream, gst_pulsesrc_stream_overflow_cb, pulsesrc); pa_stream_set_latency_update_callback (pulsesrc->stream, gst_pulsesrc_stream_latency_update_cb, pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; /* ERRORS */ invalid_caps: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Can't parse caps."), (NULL)); goto fail; } invalid_spec: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Invalid sample specification."), (NULL)); goto fail; } bad_context: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Bad context"), (NULL)); goto unlock_and_fail; } create_failed: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to create stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } unlock_and_fail: { gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); fail: return FALSE; } }
static int init(struct ao *ao) { struct pa_sample_spec ss; struct pa_channel_map map; pa_proplist *proplist = NULL; struct priv *priv = ao->priv; char *host = priv->cfg_host && priv->cfg_host[0] ? priv->cfg_host : NULL; char *sink = priv->cfg_sink && priv->cfg_sink[0] ? priv->cfg_sink : NULL; const char *version = pa_get_library_version(); ao->per_application_mixer = true; priv->broken_pause = false; /* not sure which versions are affected, assume 0.9.11* to 0.9.14* * known bad: 0.9.14, 0.9.13 * known good: 0.9.9, 0.9.10, 0.9.15 * To test: pause, wait ca. 5 seconds, framestep and see if MPlayer * hangs somewhen. */ if (strncmp(version, "0.9.1", 5) == 0 && version[5] >= '1' && version[5] <= '4') { MP_WARN(ao, "working around probably broken pause functionality,\n" " see http://www.pulseaudio.org/ticket/440\n"); priv->broken_pause = true; } if (!(priv->mainloop = pa_threaded_mainloop_new())) { MP_ERR(ao, "Failed to allocate main loop\n"); goto fail; } if (!(priv->context = pa_context_new(pa_threaded_mainloop_get_api( priv->mainloop), PULSE_CLIENT_NAME))) { MP_ERR(ao, "Failed to allocate context\n"); goto fail; } pa_context_set_state_callback(priv->context, context_state_cb, ao); if (pa_context_connect(priv->context, host, 0, NULL) < 0) goto fail; pa_threaded_mainloop_lock(priv->mainloop); if (pa_threaded_mainloop_start(priv->mainloop) < 0) goto unlock_and_fail; /* Wait until the context is ready */ pa_threaded_mainloop_wait(priv->mainloop); if (pa_context_get_state(priv->context) != PA_CONTEXT_READY) goto unlock_and_fail; ss.channels = ao->channels.num; ss.rate = ao->samplerate; ao->format = af_fmt_from_planar(ao->format); const struct format_map *fmt_map = format_maps; while (fmt_map->mp_format != ao->format) { if (fmt_map->mp_format == AF_FORMAT_UNKNOWN) { MP_VERBOSE(ao, "Unsupported format, using default\n"); fmt_map = format_maps; break; } fmt_map++; } ao->format = fmt_map->mp_format; ss.format = fmt_map->pa_format; if (!pa_sample_spec_valid(&ss)) { MP_ERR(ao, "Invalid sample spec\n"); goto unlock_and_fail; } if (!select_chmap(ao, &map)) goto unlock_and_fail; if (!(proplist = pa_proplist_new())) { MP_ERR(ao, "Failed to allocate proplist\n"); goto unlock_and_fail; } (void)pa_proplist_sets(proplist, PA_PROP_MEDIA_ROLE, "video"); if (!(priv->stream = pa_stream_new_with_proplist(priv->context, "audio stream", &ss, &map, proplist))) goto unlock_and_fail; pa_proplist_free(proplist); proplist = NULL; pa_stream_set_state_callback(priv->stream, stream_state_cb, ao); pa_stream_set_write_callback(priv->stream, stream_request_cb, ao); pa_stream_set_latency_update_callback(priv->stream, stream_latency_update_cb, ao); pa_buffer_attr bufattr = { .maxlength = -1, .tlength = pa_usec_to_bytes(1000000, &ss), .prebuf = -1, .minreq = -1, .fragsize = -1, }; if (pa_stream_connect_playback(priv->stream, sink, &bufattr, PA_STREAM_NOT_MONOTONIC, NULL, NULL) < 0) goto unlock_and_fail; /* Wait until the stream is ready */ pa_threaded_mainloop_wait(priv->mainloop); if (pa_stream_get_state(priv->stream) != PA_STREAM_READY) goto unlock_and_fail; pa_threaded_mainloop_unlock(priv->mainloop); return 0; unlock_and_fail: if (priv->mainloop) pa_threaded_mainloop_unlock(priv->mainloop); fail: if (priv->context) { if (!(pa_context_errno(priv->context) == PA_ERR_CONNECTIONREFUSED && ao->probing)) GENERIC_ERR_MSG("Init failed"); } if (proplist) pa_proplist_free(proplist); uninit(ao, true); return -1; } static void cork(struct ao *ao, bool pause) { struct priv *priv = ao->priv; pa_threaded_mainloop_lock(priv->mainloop); priv->retval = 0; if (!waitop(priv, pa_stream_cork(priv->stream, pause, success_cb, ao)) || !priv->retval) GENERIC_ERR_MSG("pa_stream_cork() failed"); } // Play the specified data to the pulseaudio server static int play(struct ao *ao, void **data, int samples, int flags) { struct priv *priv = ao->priv; pa_threaded_mainloop_lock(priv->mainloop); if (pa_stream_write(priv->stream, data[0], samples * ao->sstride, NULL, 0, PA_SEEK_RELATIVE) < 0) { GENERIC_ERR_MSG("pa_stream_write() failed"); samples = -1; } if (flags & AOPLAY_FINAL_CHUNK) { // Force start in case the stream was too short for prebuf pa_operation *op = pa_stream_trigger(priv->stream, NULL, NULL); pa_operation_unref(op); } pa_threaded_mainloop_unlock(priv->mainloop); return samples; } // Reset the audio stream, i.e. flush the playback buffer on the server side static void reset(struct ao *ao) { // pa_stream_flush() works badly if not corked cork(ao, true); struct priv *priv = ao->priv; pa_threaded_mainloop_lock(priv->mainloop); priv->retval = 0; if (!waitop(priv, pa_stream_flush(priv->stream, success_cb, ao)) || !priv->retval) GENERIC_ERR_MSG("pa_stream_flush() failed"); cork(ao, false); }
static int pulseaudio_audio_reconfig(audio_decoder_t *ad) { decoder_t *d = (decoder_t *)ad; int i; pa_threaded_mainloop_lock(mainloop); if(pulseaudio_make_context_ready()) { pa_threaded_mainloop_unlock(mainloop); return -1; } if(d->s) { pa_stream_disconnect(d->s); pa_stream_unref(d->s); } pa_channel_map map; ad->ad_out_sample_rate = ad->ad_in_sample_rate; d->ss.rate = ad->ad_in_sample_rate; switch(ad->ad_in_sample_format) { case AV_SAMPLE_FMT_S32: case AV_SAMPLE_FMT_S32P: ad->ad_out_sample_format = AV_SAMPLE_FMT_S32; d->ss.format = PA_SAMPLE_S32NE; d->framesize = sizeof(int32_t); break; case AV_SAMPLE_FMT_S16: case AV_SAMPLE_FMT_S16P: ad->ad_out_sample_format = AV_SAMPLE_FMT_S16; d->ss.format = PA_SAMPLE_S16NE; d->framesize = sizeof(int16_t); break; default: ad->ad_out_sample_format = AV_SAMPLE_FMT_FLT; d->ss.format = PA_SAMPLE_FLOAT32NE; d->framesize = sizeof(float); break; } switch(ad->ad_in_channel_layout) { case AV_CH_LAYOUT_MONO: d->ss.channels = 1; ad->ad_out_channel_layout = AV_CH_LAYOUT_MONO; pa_channel_map_init_mono(&map); break; case AV_CH_LAYOUT_STEREO: d->ss.channels = 2; ad->ad_out_channel_layout = AV_CH_LAYOUT_STEREO; pa_channel_map_init_stereo(&map); default: pa_channel_map_init(&map); for(i = 0; i < sizeof(av2pa_map) / sizeof(av2pa_map[0]); i++) { if(ad->ad_in_channel_layout & av2pa_map[i].avmask) { ad->ad_out_channel_layout |= av2pa_map[i].avmask; map.map[map.channels++] = av2pa_map[i].papos; } } d->ss.channels = map.channels; break; } d->framesize *= d->ss.channels; ad->ad_tile_size = pa_context_get_tile_size(ctx, &d->ss) / d->framesize; char buf[100]; char buf2[PA_CHANNEL_MAP_SNPRINT_MAX]; TRACE(TRACE_DEBUG, "PA", "Created stream %s [%s] (tilesize=%d)", pa_sample_spec_snprint(buf, sizeof(buf), &d->ss), pa_channel_map_snprint(buf2, sizeof(buf2), &map), ad->ad_tile_size); #if PA_API_VERSION >= 12 pa_proplist *pl = pa_proplist_new(); media_pipe_t *mp = ad->ad_mp; if(mp->mp_flags & MP_VIDEO) pa_proplist_sets(pl, PA_PROP_MEDIA_ROLE, "video"); else pa_proplist_sets(pl, PA_PROP_MEDIA_ROLE, "music"); d->s = pa_stream_new_with_proplist(ctx, "Showtime playback", &d->ss, &map, pl); pa_proplist_free(pl); #else d->s = pa_stream_new(ctx, "Showtime playback", &ss, &map); #endif int flags = 0; pa_stream_set_state_callback(d->s, stream_state_callback, d); pa_stream_set_write_callback(d->s, stream_write_callback, d); flags |= PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_INTERPOLATE_TIMING; pa_stream_connect_playback(d->s, NULL, NULL, flags, NULL, NULL); while(1) { switch(pa_stream_get_state(d->s)) { case PA_STREAM_UNCONNECTED: case PA_STREAM_CREATING: pa_threaded_mainloop_wait(mainloop); continue; case PA_STREAM_READY: pa_threaded_mainloop_unlock(mainloop); return 0; case PA_STREAM_TERMINATED: case PA_STREAM_FAILED: pa_threaded_mainloop_unlock(mainloop); return 1; } } }
/* This is called whenever the context status changes */ static void context_state_callback(pa_context *c, void *userdata) { pa_assert(c); switch (pa_context_get_state(c)) { case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME: break; case PA_CONTEXT_READY: { pa_buffer_attr buffer_attr; pa_assert(c); pa_assert(!stream); if (verbose) pa_log(_("Connection established.%s"), CLEAR_LINE); if (!(stream = pa_stream_new_with_proplist(c, NULL, &sample_spec, &channel_map, proplist))) { pa_log(_("pa_stream_new() failed: %s"), pa_strerror(pa_context_errno(c))); goto fail; } pa_stream_set_state_callback(stream, stream_state_callback, NULL); pa_stream_set_write_callback(stream, stream_write_callback, NULL); pa_stream_set_read_callback(stream, stream_read_callback, NULL); pa_stream_set_suspended_callback(stream, stream_suspended_callback, NULL); pa_stream_set_moved_callback(stream, stream_moved_callback, NULL); pa_stream_set_underflow_callback(stream, stream_underflow_callback, NULL); pa_stream_set_overflow_callback(stream, stream_overflow_callback, NULL); pa_stream_set_started_callback(stream, stream_started_callback, NULL); pa_stream_set_event_callback(stream, stream_event_callback, NULL); pa_stream_set_buffer_attr_callback(stream, stream_buffer_attr_callback, NULL); pa_zero(buffer_attr); buffer_attr.maxlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; if (latency_msec > 0) { buffer_attr.fragsize = buffer_attr.tlength = pa_usec_to_bytes(latency_msec * PA_USEC_PER_MSEC, &sample_spec); flags |= PA_STREAM_ADJUST_LATENCY; } else if (latency > 0) { buffer_attr.fragsize = buffer_attr.tlength = (uint32_t) latency; flags |= PA_STREAM_ADJUST_LATENCY; } else buffer_attr.fragsize = buffer_attr.tlength = (uint32_t) -1; if (process_time_msec > 0) { buffer_attr.minreq = pa_usec_to_bytes(process_time_msec * PA_USEC_PER_MSEC, &sample_spec); } else if (process_time > 0) buffer_attr.minreq = (uint32_t) process_time; else buffer_attr.minreq = (uint32_t) -1; if (mode == PLAYBACK) { pa_cvolume cv; if (pa_stream_connect_playback(stream, device, &buffer_attr, flags, volume_is_set ? pa_cvolume_set(&cv, sample_spec.channels, volume) : NULL, NULL) < 0) { pa_log(_("pa_stream_connect_playback() failed: %s"), pa_strerror(pa_context_errno(c))); goto fail; } } else { if (pa_stream_connect_record(stream, device, latency > 0 ? &buffer_attr : NULL, flags) < 0) { pa_log(_("pa_stream_connect_record() failed: %s"), pa_strerror(pa_context_errno(c))); goto fail; } } break; } case PA_CONTEXT_TERMINATED: quit(0); break; case PA_CONTEXT_FAILED: default: pa_log(_("Connection failure: %s"), pa_strerror(pa_context_errno(c))); goto fail; } return; fail: quit(1); }
bool AudioOutputPulseAudio::ConnectPlaybackStream(void) { QString fn_log_tag = "ConnectPlaybackStream, "; pa_proplist *proplist = pa_proplist_new(); if (!proplist) { VBERROR(fn_log_tag + QString("failed to create new proplist")); return false; } pa_proplist_sets(proplist, PA_PROP_MEDIA_ROLE, "video"); pstream = pa_stream_new_with_proplist(pcontext, "MythTV playback", &sample_spec, &channel_map, proplist); if (!pstream) { VBERROR("failed to create new playback stream"); return false; } pa_stream_set_state_callback(pstream, StreamStateCallback, this); pa_stream_set_write_callback(pstream, WriteCallback, this); pa_stream_set_overflow_callback(pstream, BufferFlowCallback, (char*)"over"); pa_stream_set_underflow_callback(pstream, BufferFlowCallback, (char*)"under"); if (set_initial_vol) { int volume = gCoreContext->GetNumSetting("MasterMixerVolume", 80); pa_cvolume_set(&volume_control, channels, (float)volume * (float)PA_VOLUME_NORM / 100.0f); } else pa_cvolume_reset(&volume_control, channels); fragment_size = (samplerate * 25 * output_bytes_per_frame) / 1000; buffer_settings.maxlength = (uint32_t)-1; buffer_settings.tlength = fragment_size * 4; buffer_settings.prebuf = (uint32_t)-1; buffer_settings.minreq = (uint32_t)-1; int flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NO_REMIX_CHANNELS; pa_stream_connect_playback(pstream, NULL, &buffer_settings, (pa_stream_flags_t)flags, NULL, NULL); pa_context_state_t cstate; pa_stream_state_t sstate; bool connected = false, failed = false; while (!(connected || failed)) { switch (cstate = pa_context_get_state(pcontext)) { case PA_CONTEXT_FAILED: case PA_CONTEXT_TERMINATED: VBERROR(QString("context is stuffed, %1") .arg(pa_strerror(pa_context_errno(pcontext)))); failed = true; break; default: switch (sstate = pa_stream_get_state(pstream)) { case PA_STREAM_READY: connected = true; break; case PA_STREAM_FAILED: case PA_STREAM_TERMINATED: VBERROR(QString("stream failed or was terminated, " "context state %1, stream state %2") .arg(cstate).arg(sstate)); failed = true; break; default: pa_threaded_mainloop_wait(mainloop); break; } } } const pa_buffer_attr *buf_attr = pa_stream_get_buffer_attr(pstream); fragment_size = buf_attr->tlength >> 2; soundcard_buffer_size = buf_attr->maxlength; VBAUDIO(QString("fragment size %1, soundcard buffer size %2") .arg(fragment_size).arg(soundcard_buffer_size)); return (connected && !failed); }
AudioStream::AudioStream(pa_context *c, pa_threaded_mainloop *m, const char *desc, int type, unsigned samplrate, const PaDeviceInfos* infos, bool ec) : audiostream_(0), mainloop_(m) { const pa_channel_map channel_map = infos->channel_map; pa_sample_spec sample_spec = { PA_SAMPLE_S16LE, // PA_SAMPLE_FLOAT32LE, samplrate, channel_map.channels }; RING_DBG("%s: trying to create stream with device %s (%dHz, %d channels)", desc, infos->name.c_str(), samplrate, channel_map.channels); assert(pa_sample_spec_valid(&sample_spec)); assert(pa_channel_map_valid(&channel_map)); std::unique_ptr<pa_proplist, decltype(pa_proplist_free)&> pl (pa_proplist_new(), pa_proplist_free); pa_proplist_sets(pl.get(), PA_PROP_FILTER_WANT, "echo-cancel"); audiostream_ = pa_stream_new_with_proplist(c, desc, &sample_spec, &channel_map, ec ? pl.get() : nullptr); if (!audiostream_) { RING_ERR("%s: pa_stream_new() failed : %s" , desc, pa_strerror(pa_context_errno(c))); throw std::runtime_error("Could not create stream\n"); } pa_buffer_attr attributes; attributes.maxlength = pa_usec_to_bytes(160 * PA_USEC_PER_MSEC, &sample_spec); attributes.tlength = pa_usec_to_bytes(80 * PA_USEC_PER_MSEC, &sample_spec); attributes.prebuf = 0; attributes.fragsize = pa_usec_to_bytes(80 * PA_USEC_PER_MSEC, &sample_spec); attributes.minreq = (uint32_t) -1; { PulseMainLoopLock lock(mainloop_); const pa_stream_flags_t flags = static_cast<pa_stream_flags_t>(PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE); if (type == PLAYBACK_STREAM || type == RINGTONE_STREAM) { pa_stream_connect_playback(audiostream_, infos->name.empty() ? NULL : infos->name.c_str(), &attributes, flags, NULL, NULL); } else if (type == CAPTURE_STREAM) { pa_stream_connect_record(audiostream_, infos->name.empty() ? NULL : infos->name.c_str(), &attributes, flags); } } pa_stream_set_state_callback(audiostream_, [](pa_stream* s, void* user_data){ static_cast<AudioStream*>(user_data)->stateChanged(s); }, this); pa_stream_set_moved_callback(audiostream_, [](pa_stream* s, void* user_data){ static_cast<AudioStream*>(user_data)->moved(s); }, this); }
pa_simple* pa_simple_new_proplist( const char *server, const char *name, pa_stream_direction_t dir, const char *dev, const char *stream_name, const pa_sample_spec *ss, const pa_channel_map *map, const pa_buffer_attr *attr, pa_proplist *proplist, int *rerror) { pa_simple *p; int error = PA_ERR_INTERNAL, r; CHECK_VALIDITY_RETURN_ANY(rerror, !server || *server, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, dir == PA_STREAM_PLAYBACK || dir == PA_STREAM_RECORD, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !dev || *dev, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, ss && pa_sample_spec_valid(ss), PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !map || (pa_channel_map_valid(map) && map->channels == ss->channels), PA_ERR_INVALID, NULL) p = pa_xnew0(pa_simple, 1); p->direction = dir; if (!(p->mainloop = pa_threaded_mainloop_new())) goto fail; if (!(p->context = pa_context_new(pa_threaded_mainloop_get_api(p->mainloop), name))) goto fail; pa_context_set_state_callback(p->context, context_state_cb, p); if (pa_context_connect(p->context, server, 0, NULL) < 0) { error = pa_context_errno(p->context); goto fail; } pa_threaded_mainloop_lock(p->mainloop); if (pa_threaded_mainloop_start(p->mainloop) < 0) goto unlock_and_fail; for (;;) { pa_context_state_t state; state = pa_context_get_state(p->context); if (state == PA_CONTEXT_READY) break; if (!PA_CONTEXT_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the context is ready */ pa_threaded_mainloop_wait(p->mainloop); } if (!(p->stream = pa_stream_new_with_proplist(p->context, stream_name, ss, map, proplist))) { error = pa_context_errno(p->context); goto unlock_and_fail; } pa_stream_set_state_callback(p->stream, stream_state_cb, p); pa_stream_set_read_callback(p->stream, stream_request_cb, p); pa_stream_set_write_callback(p->stream, stream_request_cb, p); pa_stream_set_latency_update_callback(p->stream, stream_latency_update_cb, p); if (dir == PA_STREAM_PLAYBACK) r = pa_stream_connect_playback(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL); else r = pa_stream_connect_record(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE |PA_STREAM_START_CORKED); if (r < 0) { error = pa_context_errno(p->context); goto unlock_and_fail; } for (;;) { pa_stream_state_t state; state = pa_stream_get_state(p->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the stream is ready */ pa_threaded_mainloop_wait(p->mainloop); } pa_threaded_mainloop_unlock(p->mainloop); return p; unlock_and_fail: pa_threaded_mainloop_unlock(p->mainloop); fail: if (rerror) *rerror = error; pa_simple_free(p); return NULL; }