int start_recording(recorder_context_t *rctx) { int retval = 0; time_t current_time; Log(LOG_INFO, "Starting recorder.\n"); if ((retval = init_filenames(rctx)) < 0){ Log(LOG_ERR, "Failed in the initialization of the recording file.\n"); goto exit; } Log(LOG_INFO, "Filenames created.\n"); if ((retval = init_pa(rctx)) < 0){ Log(LOG_ERR, "Failed in the initializaion of pulse audio.\n"); goto exit; } Log(LOG_INFO, "PulseAudio connected.\n"); Log(LOG_INFO, "Calibrating threshold.\n"); printf("***** ATTENTION *****\n"); printf("Keep quiet for the next %d seconds please.\n", QUIET_TIME); pa_stream_set_read_callback(rctx->recording_stream, detect_threshold_cb, rctx); rctx->timestamp = time(NULL); current_time = rctx->timestamp; #ifdef DEBUG threshold_file = fopen("/tmp/threshold.pcm", "wb"); #endif while (difftime(current_time, rctx->timestamp) < QUIET_TIME){ pa_mainloop_iterate(rctx->pa_ml, 0, &retval); current_time = time(NULL); if (retval < 0){ Log(LOG_ERR, "There was a problem calculating the threshold power.\n"); goto exit; } } Log(LOG_DEBUG, "Threshold: %f\n", rctx->threshold); Log(LOG_INFO, "Entering into the mainloop.\n"); printf("\nNow you can start talking.\n"); rctx->timestamp = time(NULL); pa_stream_set_read_callback(rctx->recording_stream, stream_request_cb, rctx); pa_mainloop_run(rctx->pa_ml, &retval); exit: stop_recording(rctx, false); return retval; }
void QPulseAudioInput::close() { if (!m_opened) return; m_timer->stop(); QPulseAudioEngine *pulseEngine = QPulseAudioEngine::instance(); if (m_stream) { pulseEngine->lock(); pa_stream_set_state_callback(m_stream, 0, 0); pa_stream_set_read_callback(m_stream, 0, 0); pa_stream_set_underflow_callback(m_stream, 0, 0); pa_stream_set_overflow_callback(m_stream, 0, 0); pa_stream_disconnect(m_stream); pa_stream_unref(m_stream); m_stream = 0; pulseEngine->unlock(); } disconnect(pulseEngine, &QPulseAudioEngine::contextFailed, this, &QPulseAudioInput::onPulseContextFailed); if (!m_pullMode && m_audioSource) { delete m_audioSource; m_audioSource = 0; } m_opened = false; }
/** * Start recording * * We request the default format used by pulse here because the data will be * converted and possibly re-sampled by obs anyway. * * For now we request a buffer length of 25ms although pulse seems to ignore * this setting for monitor streams. For "real" input streams this should work * fine though. */ static int_fast32_t pulse_start_recording(struct pulse_data *data) { if (pulse_get_server_info(pulse_server_info, (void *) data) < 0) { blog(LOG_ERROR, "Unable to get server info !"); return -1; } if (pulse_get_source_info(pulse_source_info, data->device, (void *) data) < 0) { blog(LOG_ERROR, "Unable to get source info !"); return -1; } pa_sample_spec spec; spec.format = data->format; spec.rate = data->samples_per_sec; spec.channels = data->channels; if (!pa_sample_spec_valid(&spec)) { blog(LOG_ERROR, "Sample spec is not valid"); return -1; } data->speakers = pulse_channels_to_obs_speakers(spec.channels); data->bytes_per_frame = pa_frame_size(&spec); data->stream = pulse_stream_new(obs_source_get_name(data->source), &spec, NULL); if (!data->stream) { blog(LOG_ERROR, "Unable to create stream"); return -1; } pulse_lock(); pa_stream_set_read_callback(data->stream, pulse_stream_read, (void *) data); pulse_unlock(); pa_buffer_attr attr; attr.fragsize = pa_usec_to_bytes(25000, &spec); attr.maxlength = (uint32_t) -1; attr.minreq = (uint32_t) -1; attr.prebuf = (uint32_t) -1; attr.tlength = (uint32_t) -1; pa_stream_flags_t flags = PA_STREAM_ADJUST_LATENCY; pulse_lock(); int_fast32_t ret = pa_stream_connect_record(data->stream, data->device, &attr, flags); pulse_unlock(); if (ret < 0) { pulse_stop_recording(data); blog(LOG_ERROR, "Unable to connect to stream"); return -1; } blog(LOG_INFO, "Started recording from '%s'", data->device); return 0; }
static pa_stream *qpa_simple_new ( paaudio *g, const char *name, pa_stream_direction_t dir, const char *dev, const pa_sample_spec *ss, const pa_channel_map *map, const pa_buffer_attr *attr, int *rerror) { int r; pa_stream *stream; pa_threaded_mainloop_lock (g->mainloop); stream = pa_stream_new (g->context, name, ss, map); if (!stream) { goto fail; } pa_stream_set_state_callback (stream, stream_state_cb, g); pa_stream_set_read_callback (stream, stream_request_cb, g); pa_stream_set_write_callback (stream, stream_request_cb, g); if (dir == PA_STREAM_PLAYBACK) { r = pa_stream_connect_playback (stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING #ifdef PA_STREAM_ADJUST_LATENCY |PA_STREAM_ADJUST_LATENCY #endif |PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL); } else { r = pa_stream_connect_record (stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING #ifdef PA_STREAM_ADJUST_LATENCY |PA_STREAM_ADJUST_LATENCY #endif |PA_STREAM_AUTO_TIMING_UPDATE); } if (r < 0) { goto fail; } pa_threaded_mainloop_unlock (g->mainloop); return stream; fail: pa_threaded_mainloop_unlock (g->mainloop); if (stream) { pa_stream_unref (stream); } *rerror = pa_context_errno (g->context); return NULL; }
/** * Pulseaudio context state callback */ static void context_state_callback (pa_context * c, void *userdata) { GNUNET_assert (c); switch (pa_context_get_state (c)) { case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME: break; case PA_CONTEXT_READY: { int r; pa_buffer_attr na; GNUNET_assert (!stream_in); GNUNET_log (GNUNET_ERROR_TYPE_INFO, _("Connection established.\n")); if (! (stream_in = pa_stream_new (c, "GNUNET_VoIP recorder", &sample_spec, NULL))) { GNUNET_log (GNUNET_ERROR_TYPE_ERROR, _("pa_stream_new() failed: %s\n"), pa_strerror (pa_context_errno (c))); goto fail; } pa_stream_set_state_callback (stream_in, &stream_state_callback, NULL); pa_stream_set_read_callback (stream_in, &stream_read_callback, NULL); memset (&na, 0, sizeof (na)); na.maxlength = UINT32_MAX; na.fragsize = pcm_length; if ((r = pa_stream_connect_record (stream_in, NULL, &na, PA_STREAM_ADJUST_LATENCY)) < 0) { GNUNET_log (GNUNET_ERROR_TYPE_ERROR, _("pa_stream_connect_record() failed: %s\n"), pa_strerror (pa_context_errno (c))); goto fail; } break; } case PA_CONTEXT_TERMINATED: quit (0); break; case PA_CONTEXT_FAILED: default: GNUNET_log (GNUNET_ERROR_TYPE_ERROR, _("Connection failure: %s\n"), pa_strerror (pa_context_errno (c))); goto fail; } return; fail: quit (1); }
JNIEXPORT void JNICALL Java_com_harrcharr_pulse_Stream_setReadCallback( JNIEnv *jenv, jobject jstream, jobject jcb) { pa_stream *stream = get_stream_ptr(jenv, jstream); jni_pa_cb_info_t *userdata = new_stream_cbinfo(jenv, jstream, jcb, NULL); pa_stream_set_read_callback(stream, read_cb, userdata); }
/* * start recording */ static int_fast32_t pulse_start_recording(struct pulse_data *data) { if (pulse_get_server_info(pulse_server_info, (void *) data) < 0) { blog(LOG_ERROR, "pulse-input: Unable to get server info !"); return -1; } pa_sample_spec spec; spec.format = data->format; spec.rate = data->samples_per_sec; spec.channels = data->channels; if (!pa_sample_spec_valid(&spec)) { blog(LOG_ERROR, "pulse-input: Sample spec is not valid"); return -1; } data->bytes_per_frame = pa_frame_size(&spec); blog(LOG_DEBUG, "pulse-input: %u bytes per frame", (unsigned int) data->bytes_per_frame); data->stream = pulse_stream_new(obs_source_getname(data->source), &spec, NULL); if (!data->stream) { blog(LOG_ERROR, "pulse-input: Unable to create stream"); return -1; } pulse_lock(); pa_stream_set_read_callback(data->stream, pulse_stream_read, (void *) data); pulse_unlock(); pa_buffer_attr attr; attr.fragsize = get_buffer_size(data, 250); attr.maxlength = (uint32_t) -1; attr.minreq = (uint32_t) -1; attr.prebuf = (uint32_t) -1; attr.tlength = (uint32_t) -1; pa_stream_flags_t flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY; pulse_lock(); int_fast32_t ret = pa_stream_connect_record(data->stream, data->device, &attr, flags); pulse_unlock(); if (ret < 0) { blog(LOG_ERROR, "pulse-input: Unable to connect to stream"); return -1; } blog(LOG_DEBUG, "pulse-input: Recording started"); return 0; }
JNIEXPORT void JNICALL Java_org_jitsi_impl_neomedia_pulseaudio_PA_stream_1set_1read_1callback (JNIEnv *env, jclass clazz, jlong s, jobject cb) { jweak weakCb = cb ? (*env)->NewWeakGlobalRef(env, cb) : NULL; pa_stream_set_read_callback( (pa_stream *) (intptr_t) s, weakCb ? PulseAudio_streamRequestCallback : NULL, (void *) weakCb); }
static int m_pa_stream_connect(pa_context *pa_ctx) { if (pa_context_get_server_protocol_version (pa_ctx) < 13) { return -1; } printf("server version: %d\n", pa_context_get_server_protocol_version(pa_ctx)); if (s) { pa_stream_disconnect(s); pa_stream_unref(s); } pa_proplist *proplist; pa_buffer_attr attr; pa_sample_spec ss; int res; //char dev_name[40]; // pa_sample_spec ss.channels = 1; ss.format = PA_SAMPLE_FLOAT32; ss.rate = 25; // pa_buffer_attr memset(&attr, 0, sizeof(attr)); attr.fragsize = sizeof(float); attr.maxlength = (uint32_t) -1; // pa_proplist proplist = pa_proplist_new (); pa_proplist_sets (proplist, PA_PROP_APPLICATION_ID, "Deepin Sound Settings"); // create new stream if (!(s = pa_stream_new_with_proplist(pa_ctx, "Deepin Sound Settings", &ss, NULL, proplist))) { fprintf(stderr, "pa_stream_new error\n"); return -2; } pa_proplist_free(proplist); pa_stream_set_read_callback(s, on_monitor_read_callback, NULL); pa_stream_set_suspended_callback(s, on_monitor_suspended_callback, NULL); res = pa_stream_connect_record(s, NULL, &attr, (pa_stream_flags_t) (PA_STREAM_DONT_MOVE |PA_STREAM_PEAK_DETECT |PA_STREAM_ADJUST_LATENCY)); if (res < 0) { fprintf(stderr, "Failed to connect monitoring stream\n"); return -3; } return 0; }
void QPulseAudioThread::reconnect(SourceContainer::const_iterator pos = s_sourceList.end()) { if (s_sourceList.empty()) return; if (pos != s_sourceList.end()) { s_sourcePosition = pos; qDebug() << "reconnecting with" << *pos; } else s_sourcePosition = scanForPlaybackMonitor(); if (s_sourcePosition == s_sourceList.end()) { s_sourcePosition = s_sourceList.begin(); } if (stream && (pa_stream_get_state(stream) == PA_STREAM_READY)) { //qDebug() << "disconnect"; pa_stream_disconnect ( stream ); // pa_stream_unref(stream); //qDebug() << "* return *"; } if ( ! ( stream = pa_stream_new ( context, stream_name, &sample_spec, channel_map_set ? &channel_map : NULL ) ) ) { fprintf ( stderr, "pa_stream_new() failed: %s\n", pa_strerror ( pa_context_errno ( context ) ) ); return; } pa_stream_set_state_callback ( stream, stream_state_callback, &s_sourceList ); pa_stream_set_read_callback ( stream, stream_read_callback, &s_sourceList ); pa_stream_set_moved_callback(stream, stream_moved_callback, &s_sourceList ); switch (pa_stream_get_state(stream)) { case PA_STREAM_UNCONNECTED:// The stream is not yet connected to any sink or source. qDebug() << "unconnected: connecting..."; connectHelper(s_sourcePosition); break; case PA_STREAM_CREATING ://The stream is being created. break; case PA_STREAM_READY :// The stream is established, you may pass audio data to it now. qDebug() << "stream is still ready, waiting for callback..."; break; case PA_STREAM_FAILED :// An error occured that made the stream invalid. qDebug() << "stream is now invalid. great."; break; case PA_STREAM_TERMINATED:// The stream has been terminated cleanly. qDebug() << "terminated..."; break; } }
static int instream_open_pa(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) { struct SoundIoInStreamPulseAudio *ispa = &is->backend_data.pulseaudio; struct SoundIoInStream *instream = &is->pub; if ((unsigned)instream->layout.channel_count > PA_CHANNELS_MAX) return SoundIoErrorIncompatibleBackend; if (!instream->name) instream->name = "SoundIoInStream"; struct SoundIoPulseAudio *sipa = &si->backend_data.pulseaudio; SOUNDIO_ATOMIC_STORE(ispa->stream_ready, false); pa_threaded_mainloop_lock(sipa->main_loop); pa_sample_spec sample_spec; sample_spec.format = to_pulseaudio_format(instream->format); sample_spec.rate = instream->sample_rate; sample_spec.channels = instream->layout.channel_count; pa_channel_map channel_map = to_pulseaudio_channel_map(&instream->layout); ispa->stream = pa_stream_new(sipa->pulse_context, instream->name, &sample_spec, &channel_map); if (!ispa->stream) { pa_threaded_mainloop_unlock(sipa->main_loop); instream_destroy_pa(si, is); return SoundIoErrorNoMem; } pa_stream *stream = ispa->stream; pa_stream_set_state_callback(stream, recording_stream_state_callback, is); pa_stream_set_read_callback(stream, recording_stream_read_callback, is); ispa->buffer_attr.maxlength = UINT32_MAX; ispa->buffer_attr.tlength = UINT32_MAX; ispa->buffer_attr.prebuf = 0; ispa->buffer_attr.minreq = UINT32_MAX; ispa->buffer_attr.fragsize = UINT32_MAX; if (instream->software_latency > 0.0) { int bytes_per_second = instream->bytes_per_frame * instream->sample_rate; int buffer_length = instream->bytes_per_frame * ceil_dbl_to_int(instream->software_latency * bytes_per_second / (double)instream->bytes_per_frame); ispa->buffer_attr.fragsize = buffer_length; } pa_threaded_mainloop_unlock(sipa->main_loop); return 0; }
AudioStream::~AudioStream() { PulseMainLoopLock lock(mainloop_); pa_stream_disconnect(audiostream_); // make sure we don't get any further callback pa_stream_set_state_callback(audiostream_, NULL, NULL); pa_stream_set_write_callback(audiostream_, NULL, NULL); pa_stream_set_read_callback(audiostream_, NULL, NULL); pa_stream_set_moved_callback(audiostream_, NULL, NULL); pa_stream_set_underflow_callback(audiostream_, NULL, NULL); pa_stream_set_overflow_callback(audiostream_, NULL, NULL); pa_stream_unref(audiostream_); }
static void instream_destroy_pa(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) { struct SoundIoInStreamPulseAudio *ispa = &is->backend_data.pulseaudio; struct SoundIoPulseAudio *sipa = &si->backend_data.pulseaudio; pa_stream *stream = ispa->stream; if (stream) { pa_threaded_mainloop_lock(sipa->main_loop); pa_stream_set_state_callback(stream, NULL, NULL); pa_stream_set_read_callback(stream, NULL, NULL); pa_stream_disconnect(stream); pa_stream_unref(stream); pa_threaded_mainloop_unlock(sipa->main_loop); ispa->stream = NULL; } }
static void __context_get_sink_info_callback(pa_context* context, const pa_sink_info* info, int is_last, void* data) { guac_client* client = (guac_client*) data; pa_stream* stream; pa_sample_spec spec; pa_buffer_attr attr; /* Stop if end of list reached */ if (is_last) return; guac_client_log_info(client, "Starting streaming from \"%s\"", info->description); /* Set format */ spec.format = PA_SAMPLE_S16LE; spec.rate = GUAC_VNC_AUDIO_RATE; spec.channels = GUAC_VNC_AUDIO_CHANNELS; attr.maxlength = -1; attr.fragsize = GUAC_VNC_AUDIO_FRAGMENT_SIZE; /* Create stream */ stream = pa_stream_new(context, "Guacamole Audio", &spec, NULL); /* Set stream callbacks */ pa_stream_set_state_callback(stream, __stream_state_callback, client); pa_stream_set_read_callback(stream, __stream_read_callback, client); /* Start stream */ pa_stream_connect_record(stream, info->monitor_source_name, &attr, PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND | PA_STREAM_ADJUST_LATENCY); }
/* * Iterate the main loop while recording is on. * This function runs under it's own thread called by audio_pulse_start * args: * data - pointer to user data (audio context) * * asserts: * data is not null * * returns: pointer to error code */ static void *pulse_read_audio(void *data) { audio_context_t *audio_ctx = (audio_context_t *) data; /*assertions*/ assert(audio_ctx != NULL); if(verbosity > 0) printf("AUDIO: (pulseaudio) read thread started\n"); pa_mainloop *pa_ml; pa_mainloop_api *pa_mlapi; pa_buffer_attr bufattr; pa_sample_spec ss; pa_stream_flags_t flags = 0; int r; int pa_ready = 0; /* Create a mainloop API and connection to the default server */ pa_ml = pa_mainloop_new(); pa_mlapi = pa_mainloop_get_api(pa_ml); pa_ctx = pa_context_new(pa_mlapi, "guvcview Pulse API"); if(pa_context_connect(pa_ctx, NULL, 0, NULL) < 0) { fprintf(stderr,"AUDIO: PULSE - unable to connect to server: pa_context_connect failed\n"); finish(pa_ctx, pa_ml); return ((void *) -1); } /* * This function defines a callback so the server will tell us it's state. * Our callback will wait for the state to be ready. The callback will * modify the variable to 1 so we know when we have a connection and it's * ready. * If there's an error, the callback will set pa_ready to 2 */ pa_context_set_state_callback(pa_ctx, pa_state_cb, &pa_ready); /* * This function defines a time event callback (called every TIME_EVENT_USEC) */ //pa_context_rttime_new(pa_ctx, pa_rtclock_now() + TIME_EVENT_USEC, time_event_callback, NULL); /* * We can't do anything until PA is ready, so just iterate the mainloop * and continue */ while (pa_ready == 0) { pa_mainloop_iterate(pa_ml, 1, NULL); } if (pa_ready == 2) { finish(pa_ctx, pa_ml); return ((void *) -1); } /* set the sample spec (frame rate, channels and format) */ ss.rate = audio_ctx->samprate; ss.channels = audio_ctx->channels; ss.format = PA_SAMPLE_FLOAT32LE; /*for PCM -> PA_SAMPLE_S16LE*/ recordstream = pa_stream_new(pa_ctx, "Record", &ss, NULL); if (!recordstream) fprintf(stderr, "AUDIO: (pulseaudio) pa_stream_new failed (chan:%d rate:%d)\n", ss.channels, ss.rate); /* define the callbacks */ pa_stream_set_read_callback(recordstream, stream_request_cb, (void *) audio_ctx); // Set properties of the record buffer pa_zero(bufattr); /* optimal value for all is (uint32_t)-1 ~= 2 sec */ bufattr.maxlength = (uint32_t) -1; bufattr.prebuf = (uint32_t) -1; bufattr.minreq = (uint32_t) -1; if (audio_ctx->latency > 0) { bufattr.fragsize = bufattr.tlength = pa_usec_to_bytes((audio_ctx->latency * 1000) * PA_USEC_PER_MSEC, &ss); flags |= PA_STREAM_ADJUST_LATENCY; } else bufattr.fragsize = bufattr.tlength = (uint32_t) -1; flags |= PA_STREAM_INTERPOLATE_TIMING; flags |= PA_STREAM_AUTO_TIMING_UPDATE; char * dev = audio_ctx->list_devices[audio_ctx->device].name; if(verbosity > 0) printf("AUDIO: (pulseaudio) connecting to device %s\n\t (channels %d rate %d)\n", dev, ss.channels, ss.rate); r = pa_stream_connect_record(recordstream, dev, &bufattr, flags); if (r < 0) { fprintf(stderr, "AUDIO: (pulseaudio) skip latency adjustment\n"); /* * Old pulse audio servers don't like the ADJUST_LATENCY flag, * so retry without that */ r = pa_stream_connect_record(recordstream, dev, &bufattr, PA_STREAM_INTERPOLATE_TIMING| PA_STREAM_AUTO_TIMING_UPDATE); } if (r < 0) { fprintf(stderr, "AUDIO: (pulseaudio) pa_stream_connect_record failed\n"); finish(pa_ctx, pa_ml); return ((void *) -1); } get_latency(recordstream); /* * Iterate the main loop while streaming. The second argument is whether * or not the iteration should block until something is ready to be * done. Set it to zero for non-blocking. */ while (audio_ctx->stream_flag == AUDIO_STRM_ON) { pa_mainloop_iterate(pa_ml, 1, NULL); } if(verbosity > 0) printf("AUDIO: (pulseaudio) stream terminated(%i)\n", audio_ctx->stream_flag); pa_stream_disconnect (recordstream); pa_stream_unref (recordstream); finish(pa_ctx, pa_ml); return ((void *) 0); }
void PulseAudioSystem::eventCallback(pa_mainloop_api *api, pa_defer_event *) { api->defer_enable(pade, false); if (! bSourceDone || ! bSinkDone || ! bServerDone) return; AudioInputPtr ai = g.ai; AudioOutputPtr ao = g.ao; AudioInput *raw_ai = ai.get(); AudioOutput *raw_ao = ao.get(); PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(raw_ai); PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(raw_ao); if (raw_ao) { QString odev = outputDevice(); pa_stream_state ost = pasOutput ? pa_stream_get_state(pasOutput) : PA_STREAM_TERMINATED; bool do_stop = false; bool do_start = false; if (! pao && (ost == PA_STREAM_READY)) { do_stop = true; } else if (pao) { switch (ost) { case PA_STREAM_TERMINATED: { if (pasOutput) pa_stream_unref(pasOutput); pa_sample_spec pss = qhSpecMap.value(odev); pa_channel_map pcm = qhChanMap.value(odev); if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE)) pss.format = PA_SAMPLE_FLOAT32NE; if (pss.rate == 0) pss.rate = SAMPLE_RATE; if ((pss.channels == 0) || (! g.s.doPositionalAudio())) pss.channels = 1; pasOutput = pa_stream_new(pacContext, mumble_sink_input, &pss, (pss.channels == 1) ? NULL : &pcm); pa_stream_set_state_callback(pasOutput, stream_callback, this); pa_stream_set_write_callback(pasOutput, write_callback, this); } case PA_STREAM_UNCONNECTED: do_start = true; break; case PA_STREAM_READY: { if (g.s.iOutputDelay != iDelayCache) { do_stop = true; } else if (g.s.doPositionalAudio() != bPositionalCache) { do_stop = true; } else if (odev != qsOutputCache) { do_stop = true; } break; } default: break; } } if (do_stop) { qWarning("PulseAudio: Stopping output"); pa_stream_disconnect(pasOutput); iSinkId = -1; } else if (do_start) { qWarning("PulseAudio: Starting output: %s", qPrintable(odev)); pa_buffer_attr buff; const pa_sample_spec *pss = pa_stream_get_sample_spec(pasOutput); const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short); const unsigned int iBlockLen = ((pao->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize); buff.tlength = iBlockLen * (g.s.iOutputDelay+1); buff.minreq = iBlockLen; buff.maxlength = -1; buff.prebuf = -1; buff.fragsize = iBlockLen; iDelayCache = g.s.iOutputDelay; bPositionalCache = g.s.doPositionalAudio(); qsOutputCache = odev; pa_stream_connect_playback(pasOutput, qPrintable(odev), &buff, PA_STREAM_ADJUST_LATENCY, NULL, NULL); pa_context_get_sink_info_by_name(pacContext, qPrintable(odev), sink_info_callback, this); } } if (raw_ai) { QString idev = inputDevice(); pa_stream_state ist = pasInput ? pa_stream_get_state(pasInput) : PA_STREAM_TERMINATED; bool do_stop = false; bool do_start = false; if (! pai && (ist == PA_STREAM_READY)) { do_stop = true; } else if (pai) { switch (ist) { case PA_STREAM_TERMINATED: { if (pasInput) pa_stream_unref(pasInput); pa_sample_spec pss = qhSpecMap.value(idev); if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE)) pss.format = PA_SAMPLE_FLOAT32NE; if (pss.rate == 0) pss.rate = SAMPLE_RATE; pss.channels = 1; pasInput = pa_stream_new(pacContext, "Microphone", &pss, NULL); pa_stream_set_state_callback(pasInput, stream_callback, this); pa_stream_set_read_callback(pasInput, read_callback, this); } case PA_STREAM_UNCONNECTED: do_start = true; break; case PA_STREAM_READY: { if (idev != qsInputCache) { do_stop = true; } break; } default: break; } } if (do_stop) { qWarning("PulseAudio: Stopping input"); pa_stream_disconnect(pasInput); } else if (do_start) { qWarning("PulseAudio: Starting input %s",qPrintable(idev)); pa_buffer_attr buff; const pa_sample_spec *pss = pa_stream_get_sample_spec(pasInput); const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short); const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize); buff.tlength = iBlockLen; buff.minreq = iBlockLen; buff.maxlength = -1; buff.prebuf = -1; buff.fragsize = iBlockLen; qsInputCache = idev; pa_stream_connect_record(pasInput, qPrintable(idev), &buff, PA_STREAM_ADJUST_LATENCY); } } if (raw_ai) { QString odev = outputDevice(); QString edev = qhEchoMap.value(odev); pa_stream_state est = pasSpeaker ? pa_stream_get_state(pasSpeaker) : PA_STREAM_TERMINATED; bool do_stop = false; bool do_start = false; if ((! pai || ! g.s.doEcho()) && (est == PA_STREAM_READY)) { do_stop = true; } else if (pai && g.s.doEcho()) { switch (est) { case PA_STREAM_TERMINATED: { if (pasSpeaker) pa_stream_unref(pasSpeaker); pa_sample_spec pss = qhSpecMap.value(edev); pa_channel_map pcm = qhChanMap.value(edev); if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE)) pss.format = PA_SAMPLE_FLOAT32NE; if (pss.rate == 0) pss.rate = SAMPLE_RATE; if ((pss.channels == 0) || (! g.s.bEchoMulti)) pss.channels = 1; pasSpeaker = pa_stream_new(pacContext, mumble_echo, &pss, (pss.channels == 1) ? NULL : &pcm); pa_stream_set_state_callback(pasSpeaker, stream_callback, this); pa_stream_set_read_callback(pasSpeaker, read_callback, this); } case PA_STREAM_UNCONNECTED: do_start = true; break; case PA_STREAM_READY: { if (g.s.bEchoMulti != bEchoMultiCache) { do_stop = true; } else if (edev != qsEchoCache) { do_stop = true; } break; } default: break; } } if (do_stop) { qWarning("PulseAudio: Stopping echo"); pa_stream_disconnect(pasSpeaker); } else if (do_start) { qWarning("PulseAudio: Starting echo: %s", qPrintable(edev)); pa_buffer_attr buff; const pa_sample_spec *pss = pa_stream_get_sample_spec(pasSpeaker); const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short); const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize); buff.tlength = iBlockLen; buff.minreq = iBlockLen; buff.maxlength = -1; buff.prebuf = -1; buff.fragsize = iBlockLen; bEchoMultiCache = g.s.bEchoMulti; qsEchoCache = edev; pa_stream_connect_record(pasSpeaker, qPrintable(edev), &buff, PA_STREAM_ADJUST_LATENCY); } } }
/* This is called whenever the context status changes */ static void context_state_callback(pa_context *c, void *userdata) { pa_assert(c); switch (pa_context_get_state(c)) { case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME: break; case PA_CONTEXT_READY: { pa_buffer_attr buffer_attr; pa_assert(c); pa_assert(!stream); if (verbose) pa_log(_("Connection established.%s"), CLEAR_LINE); if (!(stream = pa_stream_new_with_proplist(c, NULL, &sample_spec, &channel_map, proplist))) { pa_log(_("pa_stream_new() failed: %s"), pa_strerror(pa_context_errno(c))); goto fail; } pa_stream_set_state_callback(stream, stream_state_callback, NULL); pa_stream_set_write_callback(stream, stream_write_callback, NULL); pa_stream_set_read_callback(stream, stream_read_callback, NULL); pa_stream_set_suspended_callback(stream, stream_suspended_callback, NULL); pa_stream_set_moved_callback(stream, stream_moved_callback, NULL); pa_stream_set_underflow_callback(stream, stream_underflow_callback, NULL); pa_stream_set_overflow_callback(stream, stream_overflow_callback, NULL); pa_stream_set_started_callback(stream, stream_started_callback, NULL); pa_stream_set_event_callback(stream, stream_event_callback, NULL); pa_stream_set_buffer_attr_callback(stream, stream_buffer_attr_callback, NULL); pa_zero(buffer_attr); buffer_attr.maxlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; if (latency_msec > 0) { buffer_attr.fragsize = buffer_attr.tlength = pa_usec_to_bytes(latency_msec * PA_USEC_PER_MSEC, &sample_spec); flags |= PA_STREAM_ADJUST_LATENCY; } else if (latency > 0) { buffer_attr.fragsize = buffer_attr.tlength = (uint32_t) latency; flags |= PA_STREAM_ADJUST_LATENCY; } else buffer_attr.fragsize = buffer_attr.tlength = (uint32_t) -1; if (process_time_msec > 0) { buffer_attr.minreq = pa_usec_to_bytes(process_time_msec * PA_USEC_PER_MSEC, &sample_spec); } else if (process_time > 0) buffer_attr.minreq = (uint32_t) process_time; else buffer_attr.minreq = (uint32_t) -1; if (mode == PLAYBACK) { pa_cvolume cv; if (pa_stream_connect_playback(stream, device, &buffer_attr, flags, volume_is_set ? pa_cvolume_set(&cv, sample_spec.channels, volume) : NULL, NULL) < 0) { pa_log(_("pa_stream_connect_playback() failed: %s"), pa_strerror(pa_context_errno(c))); goto fail; } } else { if (pa_stream_connect_record(stream, device, latency > 0 ? &buffer_attr : NULL, flags) < 0) { pa_log(_("pa_stream_connect_record() failed: %s"), pa_strerror(pa_context_errno(c))); goto fail; } } break; } case PA_CONTEXT_TERMINATED: quit(0); break; case PA_CONTEXT_FAILED: default: pa_log(_("Connection failure: %s"), pa_strerror(pa_context_errno(c))); goto fail; } return; fail: quit(1); }
static void conn_state(pa_context *context, void *arg) { int err; struct pulse_conn_t *conn = arg; pa_context_state_t state = pa_context_get_state(context); switch(state) { case PA_CONTEXT_UNCONNECTED: case PA_CONTEXT_CONNECTING: break; case PA_CONTEXT_AUTHORIZING: break; case PA_CONTEXT_SETTING_NAME: break; case PA_CONTEXT_FAILED: case PA_CONTEXT_TERMINATED: break; case PA_CONTEXT_READY: { pa_sample_spec spec; pa_buffer_attr attr; spec.rate = conn->conf.rate; spec.format = PA_SAMPLE_FLOAT32NE; if(conn->conf.in > 0) { spec.channels = conn->conf.in; conn->record = pa_stream_new(conn->context, "Record", &spec, NULL); pa_stream_set_read_callback(conn->record, conn_record, conn); pa_stream_set_overflow_callback(conn->record, conn_overflow, conn); } else conn->record = NULL; if(conn->conf.out > 0) { spec.channels = conn->conf.out; conn->playback = pa_stream_new(conn->context, "Playback", &spec, NULL); pa_stream_set_write_callback(conn->playback, conn_playback, conn); pa_stream_set_underflow_callback(conn->playback, conn_underflow, conn); } else conn->playback = NULL; attr.fragsize = sizeof(float) * conn->lat; attr.maxlength = (uint32_t)-1; attr.maxlength = sizeof(float) * conn->lat; attr.minreq = 0; attr.prebuf = 0; attr.tlength = sizeof(float) * conn->lat; if(conn->record != NULL) { err = pa_stream_connect_record(conn->record, NULL, &attr, PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE); if(err < 0) fprintf(stderr, "Failed to connect to recorder."), exit(1); } if(conn->playback != NULL) { err = pa_stream_connect_playback(conn->playback, NULL, &attr, PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL); if(err < 0) fprintf(stderr, "Failed to connect to playback."), exit(1); } } break; } }
static int Open(vlc_object_t *obj) { demux_t *demux = (demux_t *)obj; demux_sys_t *sys = malloc(sizeof (*sys)); if (unlikely(sys == NULL)) return VLC_ENOMEM; sys->context = vlc_pa_connect(obj, &sys->mainloop); if (sys->context == NULL) { free(sys); return VLC_EGENERIC; } sys->stream = NULL; sys->es = NULL; sys->discontinuity = false; sys->caching = INT64_C(1000) * var_InheritInteger(obj, "live-caching"); demux->p_sys = sys; /* Stream parameters */ struct pa_sample_spec ss; ss.format = PA_SAMPLE_S16NE; ss.rate = 48000; ss.channels = 2; assert(pa_sample_spec_valid(&ss)); struct pa_channel_map map; map.channels = 2; map.map[0] = PA_CHANNEL_POSITION_FRONT_LEFT; map.map[1] = PA_CHANNEL_POSITION_FRONT_RIGHT; assert(pa_channel_map_valid(&map)); const pa_stream_flags_t flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY | PA_STREAM_FIX_FORMAT | PA_STREAM_FIX_RATE /*| PA_STREAM_FIX_CHANNELS*/; const char *dev = NULL; if (demux->psz_location != NULL && demux->psz_location[0] != '\0') dev = demux->psz_location; struct pa_buffer_attr attr = { .maxlength = -1, .fragsize = pa_usec_to_bytes(sys->caching, &ss) / 2, }; es_format_t fmt; /* Create record stream */ pa_stream *s; pa_operation *op; pa_threaded_mainloop_lock(sys->mainloop); s = pa_stream_new(sys->context, "audio stream", &ss, &map); if (s == NULL) goto error; sys->stream = s; pa_stream_set_state_callback(s, stream_state_cb, sys->mainloop); pa_stream_set_read_callback(s, stream_read_cb, demux); pa_stream_set_buffer_attr_callback(s, stream_buffer_attr_cb, demux); pa_stream_set_moved_callback(s, stream_moved_cb, demux); pa_stream_set_overflow_callback(s, stream_overflow_cb, demux); pa_stream_set_started_callback(s, stream_started_cb, demux); pa_stream_set_suspended_callback(s, stream_suspended_cb, demux); pa_stream_set_underflow_callback(s, stream_underflow_cb, demux); if (pa_stream_connect_record(s, dev, &attr, flags) < 0 || stream_wait(s, sys->mainloop)) { vlc_pa_error(obj, "cannot connect record stream", sys->context); goto error; } /* The ES should be initialized before stream_read_cb(), but how? */ const struct pa_sample_spec *pss = pa_stream_get_sample_spec(s); if ((unsigned)pss->format >= sizeof (fourccs) / sizeof (fourccs[0])) { msg_Err(obj, "unknown PulseAudio sample format %u", (unsigned)pss->format); goto error; } vlc_fourcc_t format = fourccs[pss->format]; if (format == 0) { /* FIXME: should renegotiate something else */ msg_Err(obj, "unsupported PulseAudio sample format %u", (unsigned)pss->format); goto error; } es_format_Init(&fmt, AUDIO_ES, format); fmt.audio.i_physical_channels = fmt.audio.i_original_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT; fmt.audio.i_channels = ss.channels; fmt.audio.i_rate = pss->rate; fmt.audio.i_bitspersample = aout_BitsPerSample(format); fmt.audio.i_blockalign = fmt.audio.i_bitspersample * ss.channels / 8; fmt.i_bitrate = fmt.audio.i_bitspersample * ss.channels * pss->rate; sys->framesize = fmt.audio.i_blockalign; sys->es = es_out_Add (demux->out, &fmt); /* Update the buffer attributes according to actual format */ attr.fragsize = pa_usec_to_bytes(sys->caching, pss) / 2; op = pa_stream_set_buffer_attr(s, &attr, stream_success_cb, sys->mainloop); if (likely(op != NULL)) { while (pa_operation_get_state(op) == PA_OPERATION_RUNNING) pa_threaded_mainloop_wait(sys->mainloop); pa_operation_unref(op); } stream_buffer_attr_cb(s, demux); pa_threaded_mainloop_unlock(sys->mainloop); demux->pf_demux = NULL; demux->pf_control = Control; return VLC_SUCCESS; error: pa_threaded_mainloop_unlock(sys->mainloop); Close(obj); return VLC_EGENERIC; } static void Close (vlc_object_t *obj) { demux_t *demux = (demux_t *)obj; demux_sys_t *sys = demux->p_sys; pa_stream *s = sys->stream; if (likely(s != NULL)) { pa_threaded_mainloop_lock(sys->mainloop); pa_stream_disconnect(s); pa_stream_set_state_callback(s, NULL, NULL); pa_stream_set_read_callback(s, NULL, NULL); pa_stream_set_buffer_attr_callback(s, NULL, NULL); pa_stream_set_moved_callback(s, NULL, NULL); pa_stream_set_overflow_callback(s, NULL, NULL); pa_stream_set_started_callback(s, NULL, NULL); pa_stream_set_suspended_callback(s, NULL, NULL); pa_stream_set_underflow_callback(s, NULL, NULL); pa_stream_unref(s); pa_threaded_mainloop_unlock(sys->mainloop); } vlc_pa_disconnect(obj, sys->context, sys->mainloop); free(sys); }
bool Init() { int ret = 0; mPMainLoop = pa_threaded_mainloop_new(); pa_mainloop_api *mlapi = pa_threaded_mainloop_get_api(mPMainLoop); mPContext = pa_context_new (mlapi, "USBqemu-pulse"); ret = pa_context_connect (mPContext, mServer, PA_CONTEXT_NOFLAGS, NULL ); OSDebugOut("pa_context_connect %s\n", pa_strerror(ret)); if (ret != PA_OK) goto error; pa_context_set_state_callback(mPContext, pa_context_state_cb, &mPAready ); pa_threaded_mainloop_start(mPMainLoop); // wait for pa_context_state_cb for(;;) { if(mPAready == 1) break; if(mPAready == 2 || mQuit) goto error; } mStream = pa_stream_new(mPContext, "USBqemu-pulse", &mSSpec, NULL ); pa_stream_set_read_callback(mStream, stream_read_cb, this ); // Sets individual read callback fragsize but recording itself // still "lags" ~1sec (read_cb is called in bursts) without // PA_STREAM_ADJUST_LATENCY pa_buffer_attr buffer_attr; buffer_attr.maxlength = (uint32_t) -1; buffer_attr.tlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; buffer_attr.fragsize = pa_usec_to_bytes(mBuffering * 1000, &mSSpec); OSDebugOut("usec_to_bytes %zu\n", buffer_attr.fragsize); ret = pa_stream_connect_record(mStream, mDevice.c_str(), &buffer_attr, PA_STREAM_ADJUST_LATENCY ); OSDebugOut("pa_stream_connect_record %s\n", pa_strerror(ret)); if (ret != PA_OK) goto error; // Setup resampler if (mResampler) mResampler = src_delete(mResampler); mResampler = src_new(SRC_SINC_FASTEST, mSSpec.channels, &ret); if (!mResampler) { OSDebugOut("Failed to create resampler: error %08X\n", ret); goto error; } mLastGetBuffer = hrc::now(); return true; error: Uninit(); return false; }
static void audin_pulse_open(IAudinDevice* device, AudinReceive receive, void* user_data) { pa_stream_state_t state; pa_buffer_attr buffer_attr = { 0 }; AudinPulseDevice* pulse = (AudinPulseDevice*) device; if (!pulse->context) return; if (!pulse->sample_spec.rate || pulse->stream) return; DEBUG_DVC(""); pulse->receive = receive; pulse->user_data = user_data; pa_threaded_mainloop_lock(pulse->mainloop); pulse->stream = pa_stream_new(pulse->context, "freerdp_audin", &pulse->sample_spec, NULL); if (!pulse->stream) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_DVC("pa_stream_new failed (%d)", pa_context_errno(pulse->context)); return; } pulse->bytes_per_frame = pa_frame_size(&pulse->sample_spec); pa_stream_set_state_callback(pulse->stream, audin_pulse_stream_state_callback, pulse); pa_stream_set_read_callback(pulse->stream, audin_pulse_stream_request_callback, pulse); buffer_attr.maxlength = (uint32_t) -1; buffer_attr.tlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; /* 500ms latency */ buffer_attr.fragsize = pa_usec_to_bytes(500000, &pulse->sample_spec); if (pa_stream_connect_record(pulse->stream, pulse->device_name[0] ? pulse->device_name : NULL, &buffer_attr, PA_STREAM_ADJUST_LATENCY) < 0) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_WARN("pa_stream_connect_playback failed (%d)", pa_context_errno(pulse->context)); return; } for (;;) { state = pa_stream_get_state(pulse->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { DEBUG_WARN("bad stream state (%d)", pa_context_errno(pulse->context)); break; } pa_threaded_mainloop_wait(pulse->mainloop); } pa_threaded_mainloop_unlock(pulse->mainloop); if (state == PA_STREAM_READY) { memset(&pulse->adpcm, 0, sizeof(ADPCM)); pulse->buffer = xzalloc(pulse->bytes_per_frame * pulse->frames_per_packet); pulse->buffer_frames = 0; DEBUG_DVC("connected"); } else { audin_pulse_close(device); } }
static gboolean gst_pulsesrc_create_stream (GstPulseSrc * pulsesrc, GstCaps * caps) { pa_channel_map channel_map; GstStructure *s; gboolean need_channel_layout = FALSE; GstRingBufferSpec spec; const gchar *name; memset (&spec, 0, sizeof (GstRingBufferSpec)); spec.latency_time = GST_SECOND; if (!gst_ring_buffer_parse_caps (&spec, caps)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Can't parse caps."), (NULL)); goto fail; } /* Keep the refcount of the caps at 1 to make them writable */ gst_caps_unref (spec.caps); if (!gst_pulse_fill_sample_spec (&spec, &pulsesrc->sample_spec)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Invalid sample specification."), (NULL)); goto fail; } pa_threaded_mainloop_lock (pulsesrc->mainloop); if (!pulsesrc->context) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Bad context"), (NULL)); goto unlock_and_fail; } s = gst_caps_get_structure (caps, 0); if (!gst_structure_has_field (s, "channel-layout") || !gst_pulse_gst_to_channel_map (&channel_map, &spec)) { if (spec.channels == 1) pa_channel_map_init_mono (&channel_map); else if (spec.channels == 2) pa_channel_map_init_stereo (&channel_map); else need_channel_layout = TRUE; } name = "Record Stream"; if (pulsesrc->proplist) { if (!(pulsesrc->stream = pa_stream_new_with_proplist (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map, pulsesrc->proplist))) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to create stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } } else if (!(pulsesrc->stream = pa_stream_new (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map))) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to create stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } if (need_channel_layout) { const pa_channel_map *m = pa_stream_get_channel_map (pulsesrc->stream); gst_pulse_channel_map_to_gst (m, &spec); caps = spec.caps; } GST_DEBUG_OBJECT (pulsesrc, "Caps are %" GST_PTR_FORMAT, caps); pa_stream_set_state_callback (pulsesrc->stream, gst_pulsesrc_stream_state_cb, pulsesrc); pa_stream_set_read_callback (pulsesrc->stream, gst_pulsesrc_stream_request_cb, pulsesrc); pa_stream_set_underflow_callback (pulsesrc->stream, gst_pulsesrc_stream_underflow_cb, pulsesrc); pa_stream_set_overflow_callback (pulsesrc->stream, gst_pulsesrc_stream_overflow_cb, pulsesrc); pa_stream_set_latency_update_callback (pulsesrc->stream, gst_pulsesrc_stream_latency_update_cb, pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; unlock_and_fail: gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); fail: return FALSE; }
pa_simple* pa_simple_new( const char *server, const char *name, pa_stream_direction_t dir, const char *dev, const char *stream_name, const pa_sample_spec *ss, const pa_channel_map *map, const pa_buffer_attr *attr, int *rerror) { pa_simple *p; int error = PA_ERR_INTERNAL, r; CHECK_VALIDITY_RETURN_ANY(rerror, !server || *server, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, dir == PA_STREAM_PLAYBACK || dir == PA_STREAM_RECORD, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !dev || *dev, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, ss && pa_sample_spec_valid(ss), PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !map || (pa_channel_map_valid(map) && map->channels == ss->channels), PA_ERR_INVALID, NULL) p = pa_xnew0(pa_simple, 1); p->direction = dir; if (!(p->mainloop = pa_threaded_mainloop_new())) goto fail; if (!(p->context = pa_context_new(pa_threaded_mainloop_get_api(p->mainloop), name))) goto fail; pa_context_set_state_callback(p->context, context_state_cb, p); if (pa_context_connect(p->context, server, 0, NULL) < 0) { error = pa_context_errno(p->context); goto fail; } pa_threaded_mainloop_lock(p->mainloop); if (pa_threaded_mainloop_start(p->mainloop) < 0) goto unlock_and_fail; for (;;) { pa_context_state_t state; state = pa_context_get_state(p->context); if (state == PA_CONTEXT_READY) break; if (!PA_CONTEXT_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the context is ready */ pa_threaded_mainloop_wait(p->mainloop); } if (!(p->stream = pa_stream_new(p->context, stream_name, ss, map))) { error = pa_context_errno(p->context); goto unlock_and_fail; } pa_stream_set_state_callback(p->stream, stream_state_cb, p); pa_stream_set_read_callback(p->stream, stream_request_cb, p); pa_stream_set_write_callback(p->stream, stream_request_cb, p); pa_stream_set_latency_update_callback(p->stream, stream_latency_update_cb, p); if (dir == PA_STREAM_PLAYBACK) r = pa_stream_connect_playback(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL); else r = pa_stream_connect_record(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE); if (r < 0) { error = pa_context_errno(p->context); goto unlock_and_fail; } for (;;) { pa_stream_state_t state; state = pa_stream_get_state(p->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the stream is ready */ pa_threaded_mainloop_wait(p->mainloop); } pa_threaded_mainloop_unlock(p->mainloop); return p; unlock_and_fail: pa_threaded_mainloop_unlock(p->mainloop); fail: if (rerror) *rerror = error; pa_simple_free(p); return NULL; }
int main(int argc, const char *argv[]) { pa_mainloop *pa_ml = NULL; pa_mainloop_api *pa_mlapi = NULL; pa_operation *pa_op = NULL; pa_context *pa_ctx = NULL; int pa_ready = 0; int state = 0; pa_ml = pa_mainloop_new(); pa_mlapi = pa_mainloop_get_api(pa_ml); pa_ctx = pa_context_new(pa_mlapi, "deepin"); pa_context_connect(pa_ctx, NULL, 0, NULL); pa_context_set_state_callback(pa_ctx, pa_state_cb, &pa_ready); for (;;) { if (0 == pa_ready) { pa_mainloop_iterate(pa_ml, 1, NULL); continue; } if (2 == pa_ready) { pa_context_disconnect(pa_ctx); pa_context_unref(pa_ctx); pa_mainloop_free(pa_ml); return -1; } switch (state) { case 0: if (pa_context_get_server_protocol_version (pa_ctx) < 13) { return -1; } printf("server version: %d\n", pa_context_get_server_protocol_version(pa_ctx)); pa_stream *s = NULL; pa_proplist *proplist; pa_buffer_attr attr; pa_sample_spec ss; int res; char dev_name[40]; // pa_sample_spec ss.channels = 1; ss.format = PA_SAMPLE_FLOAT32; ss.rate = 25; // pa_buffer_attr memset(&attr, 0, sizeof(attr)); attr.fragsize = sizeof(float); attr.maxlength = (uint32_t) -1; // pa_proplist proplist = pa_proplist_new (); pa_proplist_sets (proplist, PA_PROP_APPLICATION_ID, "deepin.sound"); // create new stream if (!(s = pa_stream_new_with_proplist(pa_ctx, "Peak detect", &ss, NULL, proplist))) { fprintf(stderr, "pa_stream_new error\n"); return -2; } pa_proplist_free(proplist); /*pa_stream_set_monitor_stream(s, 26);*/ pa_stream_set_read_callback(s, on_monitor_read_callback, NULL); pa_stream_set_suspended_callback(s, on_monitor_suspended_callback, NULL); res = pa_stream_connect_record(s, NULL, &attr, (pa_stream_flags_t) (PA_STREAM_DONT_MOVE |PA_STREAM_PEAK_DETECT |PA_STREAM_ADJUST_LATENCY)); if (res < 0) { fprintf(stderr, "Failed to connect monitoring stream\n"); return -3; } state++; break; case 1: usleep(100); break; case 2: return 0; break; default: return -1; } pa_mainloop_iterate(pa_ml, 1, NULL); } return 0; }
/* This is called whenever the context status changes */ static void context_state_callback(pa_context *c, void *userdata) { fail_unless(c != NULL); switch (pa_context_get_state(c)) { case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME: break; case PA_CONTEXT_READY: { pa_stream_flags_t flags = PA_STREAM_AUTO_TIMING_UPDATE; pa_buffer_attr attr; static const pa_sample_spec ss = { .format = PA_SAMPLE_S16LE, .rate = 44100, .channels = 2 }; pa_zero(attr); attr.maxlength = (uint32_t) -1; attr.tlength = latency > 0 ? (uint32_t) pa_usec_to_bytes(latency, &ss) : (uint32_t) -1; attr.prebuf = (uint32_t) -1; attr.minreq = (uint32_t) -1; attr.fragsize = (uint32_t) -1; #ifdef INTERPOLATE flags |= PA_STREAM_INTERPOLATE_TIMING; #endif if (latency > 0) flags |= PA_STREAM_ADJUST_LATENCY; pa_log("Connection established"); stream = pa_stream_new(c, "interpol-test", &ss, NULL); fail_unless(stream != NULL); if (playback) { pa_assert_se(pa_stream_connect_playback(stream, NULL, &attr, flags, NULL, NULL) == 0); pa_stream_set_write_callback(stream, stream_write_cb, NULL); } else { pa_assert_se(pa_stream_connect_record(stream, NULL, &attr, flags) == 0); pa_stream_set_read_callback(stream, stream_read_cb, NULL); } pa_stream_set_latency_update_callback(stream, stream_latency_cb, NULL); break; } case PA_CONTEXT_TERMINATED: break; case PA_CONTEXT_FAILED: default: pa_log_error("Context error: %s", pa_strerror(pa_context_errno(c))); ck_abort(); } } START_TEST (interpol_test) { pa_threaded_mainloop* m = NULL; int k; struct timeval start, last_info = { 0, 0 }; pa_usec_t old_t = 0, old_rtc = 0; #ifdef CORK bool corked = false; #endif /* Set up a new main loop */ m = pa_threaded_mainloop_new(); fail_unless(m != NULL); mainloop_api = pa_threaded_mainloop_get_api(m); fail_unless(mainloop_api != NULL); context = pa_context_new(mainloop_api, bname); fail_unless(context != NULL); pa_context_set_state_callback(context, context_state_callback, NULL); fail_unless(pa_context_connect(context, NULL, 0, NULL) >= 0); pa_gettimeofday(&start); fail_unless(pa_threaded_mainloop_start(m) >= 0); /* #ifdef CORK */ for (k = 0; k < 20000; k++) /* #else */ /* for (k = 0; k < 2000; k++) */ /* #endif */ { bool success = false, changed = false; pa_usec_t t, rtc, d; struct timeval now, tv; bool playing = false; pa_threaded_mainloop_lock(m); if (stream) { const pa_timing_info *info; if (pa_stream_get_time(stream, &t) >= 0 && pa_stream_get_latency(stream, &d, NULL) >= 0) success = true; if ((info = pa_stream_get_timing_info(stream))) { if (memcmp(&last_info, &info->timestamp, sizeof(struct timeval))) { changed = true; last_info = info->timestamp; } if (info->playing) playing = true; } } pa_threaded_mainloop_unlock(m); pa_gettimeofday(&now); if (success) { #ifdef CORK bool cork_now; #endif rtc = pa_timeval_diff(&now, &start); pa_log_info("%i\t%llu\t%llu\t%llu\t%llu\t%lli\t%u\t%u\t%llu\t%llu\n", k, (unsigned long long) rtc, (unsigned long long) t, (unsigned long long) (rtc-old_rtc), (unsigned long long) (t-old_t), (signed long long) rtc - (signed long long) t, changed, playing, (unsigned long long) latency, (unsigned long long) d); fflush(stdout); old_t = t; old_rtc = rtc; #ifdef CORK cork_now = (rtc / (2*PA_USEC_PER_SEC)) % 2 == 1; if (corked != cork_now) { pa_threaded_mainloop_lock(m); pa_operation_unref(pa_stream_cork(stream, cork_now, NULL, NULL)); pa_threaded_mainloop_unlock(m); pa_log(cork_now ? "Corking" : "Uncorking"); corked = cork_now; } #endif } /* Spin loop, ugly but normal usleep() is just too badly grained */ tv = now; while (pa_timeval_diff(pa_gettimeofday(&now), &tv) < 1000) pa_thread_yield(); } if (m) pa_threaded_mainloop_stop(m); if (stream) { pa_stream_disconnect(stream); pa_stream_unref(stream); } if (context) { pa_context_disconnect(context); pa_context_unref(context); } if (m) pa_threaded_mainloop_free(m); } END_TEST int main(int argc, char *argv[]) { int failed = 0; Suite *s; TCase *tc; SRunner *sr; if (!getenv("MAKE_CHECK")) pa_log_set_level(PA_LOG_DEBUG); bname = argv[0]; playback = argc <= 1 || !pa_streq(argv[1], "-r"); latency = (argc >= 2 && !pa_streq(argv[1], "-r")) ? atoi(argv[1]) : (argc >= 3 ? atoi(argv[2]) : 0); s = suite_create("Interpol"); tc = tcase_create("interpol"); tcase_add_test(tc, interpol_test); tcase_set_timeout(tc, 5 * 60); suite_add_tcase(s, tc); sr = srunner_create(s); srunner_run_all(sr, CK_NORMAL); failed = srunner_ntests_failed(sr); srunner_free(sr); return (failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; }
static gboolean gst_pulsesrc_create_stream (GstPulseSrc * pulsesrc, GstCaps ** caps) { pa_channel_map channel_map; const pa_channel_map *m; GstStructure *s; gboolean need_channel_layout = FALSE; GstAudioRingBufferSpec spec; const gchar *name; s = gst_caps_get_structure (*caps, 0); gst_structure_get_int (s, "channels", &spec.info.channels); if (!gst_structure_has_field (s, "channel-mask")) { if (spec.info.channels == 1) { pa_channel_map_init_mono (&channel_map); } else if (spec.info.channels == 2) { gst_structure_set (s, "channel-mask", GST_TYPE_BITMASK, GST_AUDIO_CHANNEL_POSITION_MASK (FRONT_LEFT) | GST_AUDIO_CHANNEL_POSITION_MASK (FRONT_RIGHT), NULL); pa_channel_map_init_stereo (&channel_map); } else { need_channel_layout = TRUE; gst_structure_set (s, "channel-mask", GST_TYPE_BITMASK, G_GUINT64_CONSTANT (0), NULL); } } memset (&spec, 0, sizeof (GstAudioRingBufferSpec)); spec.latency_time = GST_SECOND; if (!gst_audio_ring_buffer_parse_caps (&spec, *caps)) goto invalid_caps; /* Keep the refcount of the caps at 1 to make them writable */ gst_caps_unref (spec.caps); if (!need_channel_layout && !gst_pulse_gst_to_channel_map (&channel_map, &spec)) { need_channel_layout = TRUE; gst_structure_set (s, "channel-mask", GST_TYPE_BITMASK, G_GUINT64_CONSTANT (0), NULL); memset (spec.info.position, 0xff, sizeof (spec.info.position)); } if (!gst_pulse_fill_sample_spec (&spec, &pulsesrc->sample_spec)) goto invalid_spec; pa_threaded_mainloop_lock (pulsesrc->mainloop); if (!pulsesrc->context) goto bad_context; name = "Record Stream"; if (pulsesrc->proplist) { if (!(pulsesrc->stream = pa_stream_new_with_proplist (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map, pulsesrc->proplist))) goto create_failed; } else if (!(pulsesrc->stream = pa_stream_new (pulsesrc->context, name, &pulsesrc->sample_spec, (need_channel_layout) ? NULL : &channel_map))) goto create_failed; m = pa_stream_get_channel_map (pulsesrc->stream); gst_pulse_channel_map_to_gst (m, &spec); gst_audio_channel_positions_to_valid_order (spec.info.position, spec.info.channels); gst_caps_unref (*caps); *caps = gst_audio_info_to_caps (&spec.info); GST_DEBUG_OBJECT (pulsesrc, "Caps are %" GST_PTR_FORMAT, *caps); pa_stream_set_state_callback (pulsesrc->stream, gst_pulsesrc_stream_state_cb, pulsesrc); pa_stream_set_read_callback (pulsesrc->stream, gst_pulsesrc_stream_request_cb, pulsesrc); pa_stream_set_underflow_callback (pulsesrc->stream, gst_pulsesrc_stream_underflow_cb, pulsesrc); pa_stream_set_overflow_callback (pulsesrc->stream, gst_pulsesrc_stream_overflow_cb, pulsesrc); pa_stream_set_latency_update_callback (pulsesrc->stream, gst_pulsesrc_stream_latency_update_cb, pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; /* ERRORS */ invalid_caps: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Can't parse caps."), (NULL)); goto fail; } invalid_spec: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, SETTINGS, ("Invalid sample specification."), (NULL)); goto fail; } bad_context: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Bad context"), (NULL)); goto unlock_and_fail; } create_failed: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to create stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } unlock_and_fail: { gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); fail: return FALSE; } }
bool QPulseAudioInput::open() { if (m_opened) return true; QPulseAudioEngine *pulseEngine = QPulseAudioEngine::instance(); if (!pulseEngine->context() || pa_context_get_state(pulseEngine->context()) != PA_CONTEXT_READY) { setError(QAudio::FatalError); setState(QAudio::StoppedState); return false; } pa_sample_spec spec = QPulseAudioInternal::audioFormatToSampleSpec(m_format); if (!pa_sample_spec_valid(&spec)) { setError(QAudio::OpenError); setState(QAudio::StoppedState); return false; } m_spec = spec; #ifdef DEBUG_PULSE // QTime now(QTime::currentTime()); // qDebug()<<now.second()<<"s "<<now.msec()<<"ms :open()"; #endif if (m_streamName.isNull()) m_streamName = QString(QLatin1String("QtmPulseStream-%1-%2")).arg(::getpid()).arg(quintptr(this)).toUtf8(); #ifdef DEBUG_PULSE qDebug() << "Format: " << QPulseAudioInternal::sampleFormatToQString(spec.format); qDebug() << "Rate: " << spec.rate; qDebug() << "Channels: " << spec.channels; qDebug() << "Frame size: " << pa_frame_size(&spec); #endif pulseEngine->lock(); pa_channel_map channel_map; pa_channel_map_init_extend(&channel_map, spec.channels, PA_CHANNEL_MAP_DEFAULT); if (!pa_channel_map_compatible(&channel_map, &spec)) qWarning() << "Channel map doesn't match sample specification!"; m_stream = pa_stream_new(pulseEngine->context(), m_streamName.constData(), &spec, &channel_map); pa_stream_set_state_callback(m_stream, inputStreamStateCallback, this); pa_stream_set_read_callback(m_stream, inputStreamReadCallback, this); pa_stream_set_underflow_callback(m_stream, inputStreamUnderflowCallback, this); pa_stream_set_overflow_callback(m_stream, inputStreamOverflowCallback, this); m_periodSize = pa_usec_to_bytes(PeriodTimeMs*1000, &spec); int flags = 0; pa_buffer_attr buffer_attr; buffer_attr.maxlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.tlength = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; flags |= PA_STREAM_ADJUST_LATENCY; if (m_bufferSize > 0) buffer_attr.fragsize = (uint32_t) m_bufferSize; else buffer_attr.fragsize = (uint32_t) m_periodSize; if (pa_stream_connect_record(m_stream, m_device.data(), &buffer_attr, (pa_stream_flags_t)flags) < 0) { qWarning() << "pa_stream_connect_record() failed!"; pa_stream_unref(m_stream); m_stream = 0; pulseEngine->unlock(); setError(QAudio::OpenError); setState(QAudio::StoppedState); return false; } while (pa_stream_get_state(m_stream) != PA_STREAM_READY) pa_threaded_mainloop_wait(pulseEngine->mainloop()); const pa_buffer_attr *actualBufferAttr = pa_stream_get_buffer_attr(m_stream); m_periodSize = actualBufferAttr->fragsize; m_periodTime = pa_bytes_to_usec(m_periodSize, &spec) / 1000; if (actualBufferAttr->tlength != (uint32_t)-1) m_bufferSize = actualBufferAttr->tlength; pulseEngine->unlock(); connect(pulseEngine, &QPulseAudioEngine::contextFailed, this, &QPulseAudioInput::onPulseContextFailed); m_opened = true; m_timer->start(m_periodTime); m_clockStamp.restart(); m_timeStamp.restart(); m_elapsedTimeOffset = 0; m_totalTimeValue = 0; return true; }
krad_pulse_t *kradpulse_create(krad_audio_t *kradaudio) { krad_pulse_t *kradpulse; if ((kradpulse = calloc (1, sizeof (krad_pulse_t))) == NULL) { fprintf(stderr, "mem alloc fail\n"); exit (1); } kradpulse->kradaudio = kradaudio; kradpulse->samples[0] = malloc(24 * 8192); kradpulse->samples[1] = malloc(24 * 8192); kradpulse->interleaved_samples = malloc(48 * 8192); kradpulse->capture_samples[0] = malloc(24 * 8192); kradpulse->capture_samples[1] = malloc(24 * 8192); kradpulse->capture_interleaved_samples = malloc(48 * 8192); kradpulse->latency = 20000; // start latency in micro seconds kradpulse->underflows = 0; kradpulse->pa_ready = 0; kradpulse->retval = 0; // Create a mainloop API and connection to the default server kradpulse->pa_ml = pa_mainloop_new(); kradpulse->pa_mlapi = pa_mainloop_get_api(kradpulse->pa_ml); kradpulse->pa_ctx = pa_context_new(kradpulse->pa_mlapi, kradpulse->kradaudio->name); pa_context_connect(kradpulse->pa_ctx, NULL, 0, NULL); // This function defines a callback so the server will tell us it's state. // Our callback will wait for the state to be ready. The callback will // modify the variable to 1 so we know when we have a connection and it's // ready. // If there's an error, the callback will set pa_ready to 2 pa_context_set_state_callback(kradpulse->pa_ctx, kradpulse_state_cb, kradpulse); // We can't do anything until PA is ready, so just iterate the mainloop // and continue while (kradpulse->pa_ready == 0) { pa_mainloop_iterate(kradpulse->pa_ml, 1, NULL); } if (kradpulse->pa_ready == 2) { kradpulse->retval = -1; printf("pulseaudio fail\n"); exit(1); } kradpulse->ss.rate = 44100; kradpulse->ss.channels = 2; kradpulse->ss.format = PA_SAMPLE_FLOAT32LE; kradpulse->bufattr.fragsize = (uint32_t)-1; kradpulse->bufattr.maxlength = pa_usec_to_bytes(kradpulse->latency, &kradpulse->ss); kradpulse->bufattr.minreq = pa_usec_to_bytes(0, &kradpulse->ss); kradpulse->bufattr.prebuf = (uint32_t)-1; kradpulse->bufattr.tlength = pa_usec_to_bytes(kradpulse->latency, &kradpulse->ss); if ((kradaudio->direction == KOUTPUT) || (kradaudio->direction == KDUPLEX)) { kradpulse->playstream = pa_stream_new(kradpulse->pa_ctx, "Playback", &kradpulse->ss, NULL); if (!kradpulse->playstream) { printf("playback pa_stream_new failed\n"); exit(1); } pa_stream_set_write_callback(kradpulse->playstream, kradpulse_playback_cb, kradpulse); pa_stream_set_underflow_callback(kradpulse->playstream, kradpulse_stream_underflow_cb, kradpulse); kradpulse->r = pa_stream_connect_playback(kradpulse->playstream, NULL, &kradpulse->bufattr, PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL); if (kradpulse->r < 0) { printf("pa_stream_connect_playback failed\n"); kradpulse->retval = -1; printf("pulseaudio fail\n"); exit(1); } } if ((kradaudio->direction == KINPUT) || (kradaudio->direction == KDUPLEX)) { kradpulse->capturestream = pa_stream_new(kradpulse->pa_ctx, "Capture", &kradpulse->ss, NULL); if (!kradpulse->capturestream) { printf("capture pa_stream_new failed\n"); exit(1); } pa_stream_set_read_callback(kradpulse->capturestream, kradpulse_capture_cb, kradpulse); pa_stream_set_underflow_callback(kradpulse->capturestream, kradpulse_stream_underflow_cb, kradpulse); kradpulse->r = pa_stream_connect_record(kradpulse->capturestream, NULL, &kradpulse->bufattr, PA_STREAM_NOFLAGS ); if (kradpulse->r < 0) { printf("pa_stream_connect_capture failed\n"); kradpulse->retval = -1; printf("pulseaudio fail\n"); exit(1); } } kradaudio->sample_rate = kradpulse->ss.rate; pthread_create( &kradpulse->loop_thread, NULL, kradpulse_loop_thread, kradpulse); return kradpulse; }
void AudioSinksManager::InternalAudioSink::module_load_callback(pa_context* /*c*/, uint32_t idx, void* userdata) { AudioSinksManager::InternalAudioSink* sink = static_cast<AudioSinksManager::InternalAudioSink*>(userdata); if (idx == static_cast<uint32_t>(-1)) { sink->manager->logger->error("(AudioSink '{}') Failed to load module {}: {}", sink->name, idx, sink->manager->get_pa_error()); sink->state = State::DEAD; sink->manager->unregister_audio_sink(sink->shared_from_this()); return; } sink->module_idx = idx; sink->manager->logger->debug("(AudioSink '{}') Loaded module idx: {}, name: {}", sink->name, sink->module_idx, sink->identifier); if (sink->state == State::DEAD) { sink->stop_sink(); return; } sink->state = State::LOADED; // Start record stream pa_sample_spec sample_spec; sample_spec.format = PA_SAMPLE_S16LE; sample_spec.channels = 2; sample_spec.rate = 48000; std::string stream_name = sink->identifier + "_record_stream"; sink->stream = pa_stream_new(sink->manager->context, stream_name.c_str(), &sample_spec, NULL); if (!sink->stream) { sink->manager->logger->error("(AudioSink '{}') Failed to create stream: {}", sink->name, sink->manager->get_pa_error()); sink->free(); return; } pa_stream_set_state_callback(sink->stream, stream_state_change_callback, sink); pa_stream_set_read_callback(sink->stream, stream_read_callback, sink); std::string device_name = sink->identifier + ".monitor"; pa_buffer_attr buffer_attr; // TODO: make buffer size configurable buffer_attr.fragsize = (sample_spec.rate * sizeof(AudioSample)) / (1000 / 20); // 20ms of buffer buffer_attr.maxlength = static_cast<uint32_t>(-1); buffer_attr.minreq = buffer_attr.prebuf = buffer_attr.tlength = static_cast<uint32_t>(-1); // playback only arguments pa_stream_flags_t stream_flags = static_cast<pa_stream_flags_t>( PA_STREAM_DONT_MOVE | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_START_UNMUTED | PA_STREAM_ADJUST_LATENCY); if (pa_stream_connect_record(sink->stream, device_name.c_str(), &buffer_attr, stream_flags) < 0) { pa_stream_unref(sink->stream); sink->stream = nullptr; sink->manager->logger->error("(AudioSink '{}') Failed to connect to stream: {}", sink->name, sink->manager->get_pa_error()); sink->free(); return; } sink->state = State::RECORDING; sink->update_sink_info(); }