/***************************************************************************** * Open: open the audio device *****************************************************************************/ static int Open ( vlc_object_t *p_this ) { aout_instance_t *p_aout = (aout_instance_t *)p_this; struct aout_sys_t * p_sys; struct pa_sample_spec ss; const struct pa_buffer_attr *buffer_attr; struct pa_buffer_attr a; struct pa_channel_map map; /* Allocate structures */ p_aout->output.p_sys = p_sys = calloc( 1, sizeof( aout_sys_t ) ); if( p_sys == NULL ) return VLC_ENOMEM; PULSE_DEBUG( "Pulse start initialization"); ss.channels = aout_FormatNbChannels( &p_aout->output.output ); /* Get the input stream channel count */ /* Setup the pulse audio stream based on the input stream count */ switch(ss.channels) { case 8: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_CENTER | AOUT_CHAN_MIDDLELEFT | AOUT_CHAN_MIDDLERIGHT | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT | AOUT_CHAN_LFE; break; case 6: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_CENTER | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT | AOUT_CHAN_LFE; break; case 4: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT; break; case 2: p_aout->output.output.i_physical_channels = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT; break; case 1: p_aout->output.output.i_physical_channels = AOUT_CHAN_CENTER; break; default: msg_Err(p_aout,"Invalid number of channels"); goto fail; } /* Add a quick command line info message */ msg_Dbg(p_aout, "%d audio channels", ss.channels); ss.rate = p_aout->output.output.i_rate; if (HAVE_FPU) { ss.format = PA_SAMPLE_FLOAT32NE; p_aout->output.output.i_format = VLC_CODEC_FL32; } else { ss.format = PA_SAMPLE_S16NE; p_aout->output.output.i_format = VLC_CODEC_S16N; } if (!pa_sample_spec_valid(&ss)) { msg_Err(p_aout,"Invalid sample spec"); goto fail; } /* Reduce overall latency to 200mS to reduce audible clicks * Also pulse minreq and internal buffers are now 20mS which reduces resampling */ a.tlength = pa_bytes_per_second(&ss)/5; a.maxlength = a.tlength * 2; a.prebuf = a.tlength / 2; a.minreq = a.tlength / 10; /* Buffer size is 20mS */ p_sys->buffer_size = a.minreq; /* Initialise the speaker map setup above */ pa_channel_map_init_auto(&map, ss.channels, PA_CHANNEL_MAP_ALSA); if (!(p_sys->mainloop = pa_threaded_mainloop_new())) { msg_Err(p_aout, "Failed to allocate main loop"); goto fail; } if (!(p_sys->context = pa_context_new(pa_threaded_mainloop_get_api(p_sys->mainloop), _( PULSE_CLIENT_NAME )))) { msg_Err(p_aout, "Failed to allocate context"); goto fail; } pa_context_set_state_callback(p_sys->context, context_state_cb, p_aout); PULSE_DEBUG( "Pulse before context connect"); if (pa_context_connect(p_sys->context, NULL, 0, NULL) < 0) { msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context))); goto fail; } PULSE_DEBUG( "Pulse after context connect"); pa_threaded_mainloop_lock(p_sys->mainloop); if (pa_threaded_mainloop_start(p_sys->mainloop) < 0) { msg_Err(p_aout, "Failed to start main loop"); goto unlock_and_fail; } msg_Dbg(p_aout, "Pulse mainloop started"); /* Wait until the context is ready */ pa_threaded_mainloop_wait(p_sys->mainloop); if (pa_context_get_state(p_sys->context) != PA_CONTEXT_READY) { msg_Dbg(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } if (!(p_sys->stream = pa_stream_new(p_sys->context, "audio stream", &ss, &map))) { msg_Err(p_aout, "Failed to create stream: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } PULSE_DEBUG( "Pulse after new stream"); pa_stream_set_state_callback(p_sys->stream, stream_state_cb, p_aout); pa_stream_set_write_callback(p_sys->stream, stream_request_cb, p_aout); pa_stream_set_latency_update_callback(p_sys->stream, stream_latency_update_cb, p_aout); if (pa_stream_connect_playback(p_sys->stream, NULL, &a, PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_AUTO_TIMING_UPDATE|PA_STREAM_ADJUST_LATENCY, NULL, NULL) < 0) { msg_Err(p_aout, "Failed to connect stream: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } PULSE_DEBUG("Pulse stream connect"); /* Wait until the stream is ready */ pa_threaded_mainloop_wait(p_sys->mainloop); msg_Dbg(p_aout,"Pulse stream connected"); if (pa_stream_get_state(p_sys->stream) != PA_STREAM_READY) { msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context))); goto unlock_and_fail; } PULSE_DEBUG("Pulse after stream get status"); pa_threaded_mainloop_unlock(p_sys->mainloop); buffer_attr = pa_stream_get_buffer_attr(p_sys->stream); p_aout->output.i_nb_samples = buffer_attr->minreq / pa_frame_size(&ss); p_aout->output.pf_play = Play; aout_VolumeSoftInit(p_aout); msg_Dbg(p_aout, "Pulse initialized successfully"); { char cmt[PA_CHANNEL_MAP_SNPRINT_MAX], sst[PA_SAMPLE_SPEC_SNPRINT_MAX]; msg_Dbg(p_aout, "Buffer metrics: maxlength=%u, tlength=%u, prebuf=%u, minreq=%u", buffer_attr->maxlength, buffer_attr->tlength, buffer_attr->prebuf, buffer_attr->minreq); msg_Dbg(p_aout, "Using sample spec '%s', channel map '%s'.", pa_sample_spec_snprint(sst, sizeof(sst), pa_stream_get_sample_spec(p_sys->stream)), pa_channel_map_snprint(cmt, sizeof(cmt), pa_stream_get_channel_map(p_sys->stream))); msg_Dbg(p_aout, "Connected to device %s (%u, %ssuspended).", pa_stream_get_device_name(p_sys->stream), pa_stream_get_device_index(p_sys->stream), pa_stream_is_suspended(p_sys->stream) ? "" : "not "); } return VLC_SUCCESS; unlock_and_fail: msg_Dbg(p_aout, "Pulse initialization unlock and fail"); if (p_sys->mainloop) pa_threaded_mainloop_unlock(p_sys->mainloop); fail: msg_Dbg(p_aout, "Pulse initialization failed"); uninit(p_aout); return VLC_EGENERIC; }
bool CAESinkPULSE::Initialize(AEAudioFormat &format, std::string &device) { { CSingleLock lock(m_sec); m_IsAllocated = false; } m_passthrough = false; m_BytesPerSecond = 0; m_BufferSize = 0; m_Channels = 0; m_Stream = NULL; m_Context = NULL; m_periodSize = 0; if (!SetupContext(NULL, &m_Context, &m_MainLoop)) { CLog::Log(LOGNOTICE, "PulseAudio might not be running. Context was not created."); Deinitialize(); return false; } pa_threaded_mainloop_lock(m_MainLoop); struct pa_channel_map map; pa_channel_map_init(&map); // PULSE cannot cope with e.g. planar formats so we fallback to FLOAT // when we receive an invalid pulse format if (AEFormatToPulseFormat(format.m_dataFormat) == PA_SAMPLE_INVALID) { CLog::Log(LOGDEBUG, "PULSE does not support format: %s - will fallback to AE_FMT_FLOAT", CAEUtil::DataFormatToStr(format.m_dataFormat)); format.m_dataFormat = AE_FMT_FLOAT; } m_passthrough = AE_IS_RAW(format.m_dataFormat); if(m_passthrough) { map.channels = 2; format.m_channelLayout = AE_CH_LAYOUT_2_0; } else { map = AEChannelMapToPAChannel(format.m_channelLayout); // if count has changed we need to fit the AE Map if(map.channels != format.m_channelLayout.Count()) format.m_channelLayout = PAChannelToAEChannelMap(map); } m_Channels = format.m_channelLayout.Count(); // store information about current sink SinkInfoStruct sinkStruct; sinkStruct.mainloop = m_MainLoop; sinkStruct.device_found = false; // get real sample rate of the device we want to open - to avoid resampling bool isDefaultDevice = (device == "Default"); WaitForOperation(pa_context_get_sink_info_by_name(m_Context, isDefaultDevice ? NULL : device.c_str(), SinkInfoCallback, &sinkStruct), m_MainLoop, "Get Sink Info"); // only check if the device is existing - don't alter the sample rate if (!sinkStruct.device_found) { CLog::Log(LOGERROR, "PulseAudio: Sink %s not found", device.c_str()); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } // Pulse can resample everything between 1 hz and 192000 hz // Make sure we are in the range that we originally added format.m_sampleRate = std::max(5512U, std::min(format.m_sampleRate, 192000U)); pa_format_info *info[1]; info[0] = pa_format_info_new(); info[0]->encoding = AEFormatToPulseEncoding(format.m_dataFormat); if(!m_passthrough) { pa_format_info_set_sample_format(info[0], AEFormatToPulseFormat(format.m_dataFormat)); pa_format_info_set_channel_map(info[0], &map); } pa_format_info_set_channels(info[0], m_Channels); // PA requires m_encodedRate in order to do EAC3 unsigned int samplerate = format.m_sampleRate; if (m_passthrough && (AEFormatToPulseEncoding(format.m_dataFormat) == PA_ENCODING_EAC3_IEC61937)) { // this is only used internally for PA to use EAC3 samplerate = format.m_encodedRate; } pa_format_info_set_rate(info[0], samplerate); if (!pa_format_info_valid(info[0])) { CLog::Log(LOGERROR, "PulseAudio: Invalid format info"); pa_format_info_free(info[0]); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } pa_sample_spec spec; #if PA_CHECK_VERSION(2,0,0) pa_format_info_to_sample_spec(info[0], &spec, NULL); #else spec.rate = (AEFormatToPulseEncoding(format.m_dataFormat) == PA_ENCODING_EAC3_IEC61937) ? 4 * samplerate : samplerate; spec.format = AEFormatToPulseFormat(format.m_dataFormat); spec.channels = m_Channels; #endif if (!pa_sample_spec_valid(&spec)) { CLog::Log(LOGERROR, "PulseAudio: Invalid sample spec"); pa_format_info_free(info[0]); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } m_BytesPerSecond = pa_bytes_per_second(&spec); unsigned int frameSize = pa_frame_size(&spec); m_Stream = pa_stream_new_extended(m_Context, "kodi audio stream", info, 1, NULL); pa_format_info_free(info[0]); if (m_Stream == NULL) { CLog::Log(LOGERROR, "PulseAudio: Could not create a stream"); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } pa_stream_set_state_callback(m_Stream, StreamStateCallback, m_MainLoop); pa_stream_set_write_callback(m_Stream, StreamRequestCallback, m_MainLoop); pa_stream_set_latency_update_callback(m_Stream, StreamLatencyUpdateCallback, m_MainLoop); // default buffer construction // align with AE's max buffer unsigned int latency = m_BytesPerSecond / 2.5; // 400 ms unsigned int process_time = latency / 4; // 100 ms if(sinkStruct.isHWDevice) { // on hw devices buffers can be further reduced // 200ms max latency // 50ms min packet size latency = m_BytesPerSecond / 5; process_time = latency / 4; } pa_buffer_attr buffer_attr; buffer_attr.fragsize = latency; buffer_attr.maxlength = (uint32_t) -1; buffer_attr.minreq = process_time; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.tlength = latency; if (pa_stream_connect_playback(m_Stream, isDefaultDevice ? NULL : device.c_str(), &buffer_attr, ((pa_stream_flags)(PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY)), NULL, NULL) < 0) { CLog::Log(LOGERROR, "PulseAudio: Failed to connect stream to output"); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } /* Wait until the stream is ready */ do { pa_threaded_mainloop_wait(m_MainLoop); CLog::Log(LOGDEBUG, "PulseAudio: Stream %s", StreamStateToString(pa_stream_get_state(m_Stream))); } while (pa_stream_get_state(m_Stream) != PA_STREAM_READY && pa_stream_get_state(m_Stream) != PA_STREAM_FAILED); if (pa_stream_get_state(m_Stream) == PA_STREAM_FAILED) { CLog::Log(LOGERROR, "PulseAudio: Waited for the stream but it failed"); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } const pa_buffer_attr *a; if (!(a = pa_stream_get_buffer_attr(m_Stream))) { CLog::Log(LOGERROR, "PulseAudio: %s", pa_strerror(pa_context_errno(m_Context))); pa_threaded_mainloop_unlock(m_MainLoop); Deinitialize(); return false; } else { unsigned int packetSize = a->minreq; m_BufferSize = a->tlength; m_periodSize = a->minreq; format.m_frames = packetSize / frameSize; } { CSingleLock lock(m_sec); // Register Callback for Sink changes pa_context_set_subscribe_callback(m_Context, SinkChangedCallback, this); const pa_subscription_mask_t mask = PA_SUBSCRIPTION_MASK_SINK; pa_operation *op = pa_context_subscribe(m_Context, mask, NULL, this); if (op != NULL) pa_operation_unref(op); // Register Callback for Sink Info changes - this handles volume pa_context_set_subscribe_callback(m_Context, SinkInputInfoChangedCallback, this); const pa_subscription_mask_t mask_input = PA_SUBSCRIPTION_MASK_SINK_INPUT; pa_operation* op_sinfo = pa_context_subscribe(m_Context, mask_input, NULL, this); if (op_sinfo != NULL) pa_operation_unref(op_sinfo); } pa_threaded_mainloop_unlock(m_MainLoop); format.m_frameSize = frameSize; format.m_frameSamples = format.m_frames * format.m_channelLayout.Count(); m_format = format; format.m_dataFormat = m_passthrough ? AE_FMT_S16NE : format.m_dataFormat; CLog::Log(LOGNOTICE, "PulseAudio: Opened device %s in %s mode with Buffersize %u ms", device.c_str(), m_passthrough ? "passthrough" : "pcm", (unsigned int) ((m_BufferSize / (float) m_BytesPerSecond) * 1000)); // Cork stream will resume when adding first package Pause(true); { CSingleLock lock(m_sec); m_IsAllocated = true; } return true; }
int main(int argc, char **argv) { pa_proplist *proplist = NULL; pa_mainloop *m = NULL; pa_mainloop_api *api = NULL; pa_context *context = NULL; struct context *ctx; struct audio_file *file; int ret; ctx = malloc(sizeof(struct context)); if (!ctx) { errorp("Couldn't allocate async callbacks context"); goto quit; } memset(ctx, 0, sizeof(*ctx)); file = audio_file_new("samples/sample.wav"); if (!file) goto quit; proplist = pa_proplist_new(); if (!proplist) { error("Couldn't create a PulseAudio property list"); goto quit; } pa_proplist_sets(proplist, PA_PROP_APPLICATION_NAME, "malicious-client-kill-server"); m = pa_mainloop_new(); if (!m) { error("Couldn't create PulseAudio mainloop"); goto quit; } api = pa_mainloop_get_api(m); context = pa_context_new_with_proplist(api, NULL, proplist); if (!context) { error("Couldn't create client context"); goto quit; } ctx->file = file; ctx->mainloop_api = api; ctx->context = context; pa_context_set_state_callback(context, context_state_callback, ctx); ret = pa_context_connect(context, NULL, 0, NULL); if (ret < 0) { error ("Couldn't connect to PulseAudio server: %s", pa_strerror(pa_context_errno(context))); goto quit; } pa_mainloop_run(m, &ret); return ret; quit: exit(EXIT_FAILURE); }
static int paStreamOpen(PDRVHOSTPULSEAUDIO pThis, bool fIn, const char *pszName, pa_sample_spec *pSampleSpec, pa_buffer_attr *pBufAttr, pa_stream **ppStream) { AssertPtrReturn(pThis, VERR_INVALID_POINTER); AssertPtrReturn(pszName, VERR_INVALID_POINTER); AssertPtrReturn(pSampleSpec, VERR_INVALID_POINTER); AssertPtrReturn(pBufAttr, VERR_INVALID_POINTER); AssertPtrReturn(ppStream, VERR_INVALID_POINTER); if (!pa_sample_spec_valid(pSampleSpec)) { LogRel(("PulseAudio: Unsupported sample specification for stream \"%s\"\n", pszName)); return VERR_NOT_SUPPORTED; } int rc = VINF_SUCCESS; pa_stream *pStream = NULL; uint32_t flags = PA_STREAM_NOFLAGS; LogFunc(("Opening \"%s\", rate=%dHz, channels=%d, format=%s\n", pszName, pSampleSpec->rate, pSampleSpec->channels, pa_sample_format_to_string(pSampleSpec->format))); pa_threaded_mainloop_lock(pThis->pMainLoop); do { /** @todo r=andy Use pa_stream_new_with_proplist instead. */ if (!(pStream = pa_stream_new(pThis->pContext, pszName, pSampleSpec, NULL /* pa_channel_map */))) { LogRel(("PulseAudio: Could not create stream \"%s\"\n", pszName)); rc = VERR_NO_MEMORY; break; } pa_stream_set_state_callback(pStream, paStreamCbStateChanged, pThis); #if PA_API_VERSION >= 12 /* XXX */ flags |= PA_STREAM_ADJUST_LATENCY; #endif #if 0 /* Not applicable as we don't use pa_stream_get_latency() and pa_stream_get_time(). */ flags |= PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE; #endif /* No input/output right away after the stream was started. */ flags |= PA_STREAM_START_CORKED; if (fIn) { LogFunc(("Input stream attributes: maxlength=%d fragsize=%d\n", pBufAttr->maxlength, pBufAttr->fragsize)); if (pa_stream_connect_record(pStream, /*dev=*/NULL, pBufAttr, (pa_stream_flags_t)flags) < 0) { LogRel(("PulseAudio: Could not connect input stream \"%s\": %s\n", pszName, pa_strerror(pa_context_errno(pThis->pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } else { LogFunc(("Output buffer attributes: maxlength=%d tlength=%d prebuf=%d minreq=%d\n", pBufAttr->maxlength, pBufAttr->tlength, pBufAttr->prebuf, pBufAttr->minreq)); if (pa_stream_connect_playback(pStream, /*dev=*/NULL, pBufAttr, (pa_stream_flags_t)flags, /*cvolume=*/NULL, /*sync_stream=*/NULL) < 0) { LogRel(("PulseAudio: Could not connect playback stream \"%s\": %s\n", pszName, pa_strerror(pa_context_errno(pThis->pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } /* Wait until the stream is ready. */ for (;;) { if (!pThis->fLoopWait) pa_threaded_mainloop_wait(pThis->pMainLoop); pThis->fLoopWait = false; pa_stream_state_t streamSt = pa_stream_get_state(pStream); if (streamSt == PA_STREAM_READY) break; else if ( streamSt == PA_STREAM_FAILED || streamSt == PA_STREAM_TERMINATED) { LogRel(("PulseAudio: Failed to initialize stream \"%s\" (state %ld)\n", pszName, streamSt)); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } if (RT_FAILURE(rc)) break; const pa_buffer_attr *pBufAttrObtained = pa_stream_get_buffer_attr(pStream); AssertPtr(pBufAttrObtained); memcpy(pBufAttr, pBufAttrObtained, sizeof(pa_buffer_attr)); if (fIn) LogFunc(("Obtained record buffer attributes: maxlength=%RU32, fragsize=%RU32\n", pBufAttr->maxlength, pBufAttr->fragsize)); else LogFunc(("Obtained playback buffer attributes: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d\n", pBufAttr->maxlength, pBufAttr->tlength, pBufAttr->prebuf, pBufAttr->minreq)); } while (0); if ( RT_FAILURE(rc) && pStream) pa_stream_disconnect(pStream); pa_threaded_mainloop_unlock(pThis->pMainLoop); if (RT_FAILURE(rc)) { if (pStream) pa_stream_unref(pStream); } else *ppStream = pStream; LogFlowFuncLeaveRC(rc); return rc; }
void * record(void *args) { int error; pa_sample_spec ss; char ss_a[PA_SAMPLE_SPEC_SNPRINT_MAX]; memset(pa_buff, 0, ABUFF_SIZE); ss.format = PA_SAMPLE_FLOAT32LE; ss.channels = CHANNELS; ss.rate = frequency; pa_s = pa_simple_new(NULL, /* PulseAudio server. */ "Recorder", /* Application's name. */ PA_STREAM_RECORD, /* Stream direction. */ NULL, /* Sink Device. */ "PulseAudio-read", /* Stream description. */ &ss, /* Sample format. */ NULL, /* Channel map */ NULL, /* Buffering attributes. */ &error /* Error code. */ ); if (NULL == pa_s) { fprintf(stderr, __FILE__": pa_simple_new() failed: %s\n", pa_strerror(error)); exit(1); } pa_sample_spec_snprint(ss_a, sizeof(ss_a), &ss); D("Opening the recording stream with sample specification '%s'", ss_a); D("%s", "Start recording"); while (recording) { int n; int error; n = pa_simple_read(pa_s, (void *)pa_buff, ABUFF_SIZE, &error); if (-1 != n) { int i; ei_x_buff result; /* Prepare the output buffer that will hold the result */ check(ei_x_new_with_version(&result)); /* List size */ check(ei_x_encode_list_header(&result, INSIZE)); /* List elements */ for (i = 0; i < NSAMPLES; i++) check(ei_x_encode_double(&result, pa_buff[i])); /* Make a proper list */ check(ei_x_encode_empty_list(&result)); // D("%s", "Sending data"); write_cmd(&result); ei_x_free(&result); } } pa_simple_free(pa_s); pthread_exit(NULL); }
void *pa_fft_thread(void *arg) { struct pa_fft *t = (struct pa_fft *)arg; float weights[t->buffer_samples]; graph_init(t); avg_buf_init(t); weights_init(weights, t->fft_memb, t->win_type); while (t->cont) { while(SDL_PollEvent(&t->event)) { switch(t->event.type) { case SDL_QUIT: t->cont = 0; break; case SDL_WINDOWEVENT: SDL_GL_GetDrawableSize(t->win, &t->width, &t->height); glViewport(0, 0, t->width, t->height); break; default: break; } } if (t->overlap) memcpy(&t->pa_buf[0], &t->pa_buf[t->pa_samples], t->pa_samples*sizeof(float)); pa_usec_t lag = pa_simple_get_latency(t->s, &t->error); if (pa_simple_read(t->s, &t->pa_buf[t->overlap ? t->pa_samples : 0], t->pa_buf_size, &t->error) < 0) { fprintf(stderr, __FILE__": pa_simple_read() failed: %s\n", pa_strerror(t->error)); t->cont = 0; continue; } apply_win(t->buffer, t->pa_buf, weights, t->buffer_samples); fftw_execute(t->plan); double freq_low, freq_disp, freq_range, freq_off, mag_max = 0.0f; if (t->log_graph) { freq_low = log10((t->start_low*t->fft_fund_freq)/((float)t->ss.rate/2)); freq_disp = 1.0 - log10((t->fft_memb*t->fft_fund_freq)/((float)t->ss.rate/2)); freq_range = (1.0 - freq_disp) - freq_low; freq_off = 0.0f; } else { freq_low = (t->start_low*t->fft_fund_freq)/((float)t->ss.rate/2); freq_disp = 1.0 - (t->fft_memb*t->fft_fund_freq)/((float)t->ss.rate/2); freq_range = (1.0 - freq_disp) - freq_low; freq_off = 1.0f; } for (int i = t->start_low; i < t->fft_memb; i++) { fftw_complex num = t->output[i]; double mag = creal(num)*creal(num) + cimag(num)*cimag(num); mag = log10(mag)/10; mag = frame_average(mag, t->frame_avg_mag[i], t->frame_avg, 1); mag_max = mag > mag_max ? mag : mag_max; } if (!t->no_refresh) glClear(GL_COLOR_BUFFER_BIT); glBegin(GL_LINE_STRIP); if ((float)lag/1000000 < 1.0f) glColor3f(255.0,255.0,255.0); else glColor3f(255.0,0.0,0.0); for (int i = t->start_low; i < t->fft_memb; i++) { double freq; fftw_complex num = t->output[i]; if (t->log_graph) freq = log10((i*t->fft_fund_freq)/((float)t->ss.rate/2)); else freq = (i*t->fft_fund_freq)/((float)t->ss.rate/2); double mag = creal(num)*creal(num) + cimag(num)*cimag(num); mag = log10(mag)/10; mag = frame_average(mag, t->frame_avg_mag[i], t->frame_avg, 0); glVertex2f((freq/freq_range + freq_disp/2)*2 - freq_off, mag + mag_max + 0.5f); } glEnd(); SDL_GL_SwapWindow(t->win); } SDL_DestroyWindow(t->win); SDL_Quit(); deinit_fft(t); return NULL; }
bool AudioOutputPulseAudio::ConnectPlaybackStream(void) { QString fn_log_tag = "ConnectPlaybackStream, "; pstream = pa_stream_new(pcontext, "MythTV playback", &sample_spec, &channel_map); if (!pstream) { VBERROR(fn_log_tag + QString("failed to create new playback stream")); return false; } pa_stream_set_state_callback(pstream, StreamStateCallback, this); pa_stream_set_write_callback(pstream, WriteCallback, this); pa_stream_set_overflow_callback(pstream, BufferFlowCallback, (char*)"over"); pa_stream_set_underflow_callback(pstream, BufferFlowCallback, (char*)"under"); if (set_initial_vol) { int volume = gCoreContext->GetNumSetting("MasterMixerVolume", 80); pa_cvolume_set(&volume_control, channels, (float)volume * (float)PA_VOLUME_NORM / 100.0f); } else pa_cvolume_reset(&volume_control, channels); fragment_size = (samplerate * 25 * output_bytes_per_frame) / 1000; buffer_settings.maxlength = (uint32_t)-1; buffer_settings.tlength = fragment_size * 4; buffer_settings.prebuf = (uint32_t)-1; buffer_settings.minreq = (uint32_t)-1; int flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NO_REMIX_CHANNELS; pa_stream_connect_playback(pstream, NULL, &buffer_settings, (pa_stream_flags_t)flags, &volume_control, NULL); pa_context_state_t cstate; pa_stream_state_t sstate; bool connected = false, failed = false; while (!(connected || failed)) { switch (cstate = pa_context_get_state(pcontext)) { case PA_CONTEXT_FAILED: case PA_CONTEXT_TERMINATED: VERBOSE(VB_IMPORTANT, LOC_ERR + fn_log_tag + QString("context is stuffed, %1") .arg(pa_strerror(pa_context_errno( pcontext)))); failed = true; break; default: switch (sstate = pa_stream_get_state(pstream)) { case PA_STREAM_READY: connected = true; break; case PA_STREAM_FAILED: case PA_STREAM_TERMINATED: VBERROR(fn_log_tag + QString("stream failed or was terminated, " "context state %1, stream state %2") .arg(cstate).arg(sstate)); failed = true; break; default: pa_threaded_mainloop_wait(mainloop); break; } } } const pa_buffer_attr *buf_attr = pa_stream_get_buffer_attr(pstream); fragment_size = buf_attr->tlength >> 2; soundcard_buffer_size = buf_attr->maxlength; VBAUDIO(fn_log_tag + QString("fragment size %1, soundcard buffer size %2") .arg(fragment_size).arg(soundcard_buffer_size)); return (connected && !failed); }
/** libao initialization function, arguments are sampling frequency, * number of channels, sample type and some flags */ static int init(int rate_hz, int channels, int format, int flags) { struct pa_sample_spec ss; struct pa_buffer_attr a; char hn[128]; char *host = NULL; assert(!context && !stream && !mainloop); if (ao_subdevice) { int i = strcspn(ao_subdevice, ":"); if (i >= sizeof(hn)) i = sizeof(hn)-1; if (i > 0) { strncpy(host = hn, ao_subdevice, i); hn[i] = 0; } if (ao_subdevice[i] == ':') sink = ao_subdevice+i+1; } mp_msg(MSGT_AO, MSGL_ERR, "AO: [polyp] -%s-%s-\n", host, sink); ss.channels = channels; ss.rate = rate_hz; switch (format) { case AF_FORMAT_U8: ss.format = PA_SAMPLE_U8; break; case AF_FORMAT_S16_LE: ss.format = PA_SAMPLE_S16LE; break; case AF_FORMAT_S16_BE: ss.format = PA_SAMPLE_S16BE; break; case AF_FORMAT_FLOAT_NE: ss.format = PA_SAMPLE_FLOAT32; break; default: mp_msg(MSGT_AO, MSGL_ERR, "AO: [polyp] Unsupported sample spec\n"); goto fail; } if (!pa_sample_spec_valid(&ss)) { mp_msg(MSGT_AO, MSGL_ERR, "AO: [polyp] Invalid sample spec\n"); goto fail; } mainloop = pa_mainloop_new(); assert(mainloop); context = pa_context_new(pa_mainloop_get_api(mainloop), POLYP_CLIENT_NAME); assert(context); pa_context_connect(context, host, 1, NULL); wait_for_completion(); if (pa_context_get_state(context) != PA_CONTEXT_READY) { mp_msg(MSGT_AO, MSGL_ERR, "AO: [polyp] Failed to connect to server: %s\n", pa_strerror(pa_context_errno(context))); goto fail; } stream = pa_stream_new(context, "audio stream", &ss); assert(stream); a.maxlength = pa_bytes_per_second(&ss)*1; a.tlength = a.maxlength*9/10; a.prebuf = a.tlength/2; a.minreq = a.tlength/10; pa_stream_connect_playback(stream, sink, &a, PA_STREAM_INTERPOLATE_LATENCY, PA_VOLUME_NORM); wait_for_completion(); if (pa_stream_get_state(stream) != PA_STREAM_READY) { mp_msg(MSGT_AO, MSGL_ERR, "AO: [polyp] Failed to connect to server: %s\n", pa_strerror(pa_context_errno(context))); goto fail; } return 1; fail: uninit(1); return 0; }
/* This is called whenever the context status changes */ static void context_state_callback(pa_context *c, void *userdata) { fail_unless(c != NULL); switch (pa_context_get_state(c)) { case PA_CONTEXT_CONNECTING: case PA_CONTEXT_AUTHORIZING: case PA_CONTEXT_SETTING_NAME: break; case PA_CONTEXT_READY: { pa_stream_flags_t flags = PA_STREAM_AUTO_TIMING_UPDATE; pa_buffer_attr attr; static const pa_sample_spec ss = { .format = PA_SAMPLE_S16LE, .rate = 44100, .channels = 2 }; pa_zero(attr); attr.maxlength = (uint32_t) -1; attr.tlength = latency > 0 ? (uint32_t) pa_usec_to_bytes(latency, &ss) : (uint32_t) -1; attr.prebuf = (uint32_t) -1; attr.minreq = (uint32_t) -1; attr.fragsize = (uint32_t) -1; #ifdef INTERPOLATE flags |= PA_STREAM_INTERPOLATE_TIMING; #endif if (latency > 0) flags |= PA_STREAM_ADJUST_LATENCY; pa_log("Connection established"); stream = pa_stream_new(c, "interpol-test", &ss, NULL); fail_unless(stream != NULL); if (playback) { pa_assert_se(pa_stream_connect_playback(stream, NULL, &attr, flags, NULL, NULL) == 0); pa_stream_set_write_callback(stream, stream_write_cb, NULL); } else { pa_assert_se(pa_stream_connect_record(stream, NULL, &attr, flags) == 0); pa_stream_set_read_callback(stream, stream_read_cb, NULL); } pa_stream_set_latency_update_callback(stream, stream_latency_cb, NULL); break; } case PA_CONTEXT_TERMINATED: break; case PA_CONTEXT_FAILED: default: pa_log_error("Context error: %s", pa_strerror(pa_context_errno(c))); fail(); } } START_TEST (interpol_test) { pa_threaded_mainloop* m = NULL; int k; struct timeval start, last_info = { 0, 0 }; pa_usec_t old_t = 0, old_rtc = 0; #ifdef CORK bool corked = false; #endif /* Set up a new main loop */ m = pa_threaded_mainloop_new(); fail_unless(m != NULL); mainloop_api = pa_threaded_mainloop_get_api(m); fail_unless(mainloop_api != NULL); context = pa_context_new(mainloop_api, bname); fail_unless(context != NULL); pa_context_set_state_callback(context, context_state_callback, NULL); fail_unless(pa_context_connect(context, NULL, 0, NULL) >= 0); pa_gettimeofday(&start); fail_unless(pa_threaded_mainloop_start(m) >= 0); /* #ifdef CORK */ for (k = 0; k < 20000; k++) /* #else */ /* for (k = 0; k < 2000; k++) */ /* #endif */ { bool success = false, changed = false; pa_usec_t t, rtc, d; struct timeval now, tv; bool playing = false; pa_threaded_mainloop_lock(m); if (stream) { const pa_timing_info *info; if (pa_stream_get_time(stream, &t) >= 0 && pa_stream_get_latency(stream, &d, NULL) >= 0) success = true; if ((info = pa_stream_get_timing_info(stream))) { if (memcmp(&last_info, &info->timestamp, sizeof(struct timeval))) { changed = true; last_info = info->timestamp; } if (info->playing) playing = true; } } pa_threaded_mainloop_unlock(m); pa_gettimeofday(&now); if (success) { #ifdef CORK bool cork_now; #endif rtc = pa_timeval_diff(&now, &start); pa_log_info("%i\t%llu\t%llu\t%llu\t%llu\t%lli\t%u\t%u\t%llu\t%llu\n", k, (unsigned long long) rtc, (unsigned long long) t, (unsigned long long) (rtc-old_rtc), (unsigned long long) (t-old_t), (signed long long) rtc - (signed long long) t, changed, playing, (unsigned long long) latency, (unsigned long long) d); fflush(stdout); old_t = t; old_rtc = rtc; #ifdef CORK cork_now = (rtc / (2*PA_USEC_PER_SEC)) % 2 == 1; if (corked != cork_now) { pa_threaded_mainloop_lock(m); pa_operation_unref(pa_stream_cork(stream, cork_now, NULL, NULL)); pa_threaded_mainloop_unlock(m); pa_log(cork_now ? "Corking" : "Uncorking"); corked = cork_now; } #endif } /* Spin loop, ugly but normal usleep() is just too badly grained */ tv = now; while (pa_timeval_diff(pa_gettimeofday(&now), &tv) < 1000) pa_thread_yield(); } if (m) pa_threaded_mainloop_stop(m); if (stream) { pa_stream_disconnect(stream); pa_stream_unref(stream); } if (context) { pa_context_disconnect(context); pa_context_unref(context); } if (m) pa_threaded_mainloop_free(m); } END_TEST int main(int argc, char *argv[]) { int failed = 0; Suite *s; TCase *tc; SRunner *sr; if (!getenv("MAKE_CHECK")) pa_log_set_level(PA_LOG_DEBUG); bname = argv[0]; playback = argc <= 1 || !pa_streq(argv[1], "-r"); latency = (argc >= 2 && !pa_streq(argv[1], "-r")) ? atoi(argv[1]) : (argc >= 3 ? atoi(argv[2]) : 0); s = suite_create("Interpol"); tc = tcase_create("interpol"); tcase_add_test(tc, interpol_test); tcase_set_timeout(tc, 5 * 60); suite_add_tcase(s, tc); sr = srunner_create(s); srunner_run_all(sr, CK_NORMAL); failed = srunner_ntests_failed(sr); srunner_free(sr); return (failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; }
bool Init() { int ret = 0; mPMainLoop = pa_threaded_mainloop_new(); pa_mainloop_api *mlapi = pa_threaded_mainloop_get_api(mPMainLoop); mPContext = pa_context_new (mlapi, "USBqemu-pulse"); ret = pa_context_connect (mPContext, mServer, PA_CONTEXT_NOFLAGS, NULL ); OSDebugOut("pa_context_connect %s\n", pa_strerror(ret)); if (ret != PA_OK) goto error; pa_context_set_state_callback(mPContext, pa_context_state_cb, &mPAready ); pa_threaded_mainloop_start(mPMainLoop); // wait for pa_context_state_cb for(;;) { if(mPAready == 1) break; if(mPAready == 2 || mQuit) goto error; } mStream = pa_stream_new(mPContext, "USBqemu-pulse", &mSSpec, NULL ); pa_stream_set_read_callback(mStream, stream_read_cb, this ); // Sets individual read callback fragsize but recording itself // still "lags" ~1sec (read_cb is called in bursts) without // PA_STREAM_ADJUST_LATENCY pa_buffer_attr buffer_attr; buffer_attr.maxlength = (uint32_t) -1; buffer_attr.tlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; buffer_attr.fragsize = pa_usec_to_bytes(mBuffering * 1000, &mSSpec); OSDebugOut("usec_to_bytes %zu\n", buffer_attr.fragsize); ret = pa_stream_connect_record(mStream, mDevice.c_str(), &buffer_attr, PA_STREAM_ADJUST_LATENCY ); OSDebugOut("pa_stream_connect_record %s\n", pa_strerror(ret)); if (ret != PA_OK) goto error; // Setup resampler if (mResampler) mResampler = src_delete(mResampler); mResampler = src_new(SRC_SINC_FASTEST, mSSpec.channels, &ret); if (!mResampler) { OSDebugOut("Failed to create resampler: error %08X\n", ret); goto error; } mLastGetBuffer = hrc::now(); return true; error: Uninit(); return false; }
void PulseAudioSource::stream_read_cb (pa_stream *p, size_t nbytes, void *userdata) { PulseAudioSource *src = (PulseAudioSource *) userdata; const void* padata = NULL; if (src->mQuit) return; OSDebugOut("stream_read_callback %d bytes\n", nbytes); int ret = pa_stream_peek(p, &padata, &nbytes); OSDebugOut("pa_stream_peek %zu %s\n", nbytes, pa_strerror(ret)); if (ret != PA_OK) return; auto dur = std::chrono::duration_cast<ms>(hrc::now() - src->mLastGetBuffer).count(); if (src->mPaused || dur > 50000) { ret = pa_stream_drop(p); if (ret != PA_OK) OSDebugOut("pa_stream_drop %s\n", pa_strerror(ret)); return; } { size_t old_size = src->mQBuffer.size(); size_t nfloats = nbytes / sizeof(float); src->mQBuffer.resize(old_size + nfloats); memcpy(&src->mQBuffer[old_size], padata, nbytes); //if copy succeeded, drop samples at pulse's side ret = pa_stream_drop(p); if (ret != PA_OK) OSDebugOut("pa_stream_drop %s\n", pa_strerror(ret)); } size_t resampled = static_cast<size_t>(src->mQBuffer.size() * src->mResampleRatio * src->mTimeAdjust);// * src->mSSpec.channels; if (resampled == 0) resampled = src->mQBuffer.size(); std::vector<float> rebuf(resampled); SRC_DATA data; memset(&data, 0, sizeof(SRC_DATA)); data.data_in = &src->mQBuffer[0]; data.input_frames = src->mQBuffer.size() / src->mSSpec.channels; data.data_out = &rebuf[0]; data.output_frames = resampled / src->mSSpec.channels; data.src_ratio = src->mResampleRatio * src->mTimeAdjust; src_process(src->mResampler, &data); std::lock_guard<std::mutex> lock(src->mMutex); uint32_t len = data.output_frames_gen * src->mSSpec.channels; size_t size = src->mResampledBuffer.size(); if (len > 0) { //too long, drop samples, caused by saving/loading savestates and random stutters int sizeInMS = (((src->mResampledBuffer.size() + len) * 1000 / src->mSSpec.channels) / src->mOutputSamplesPerSec); int threshold = src->mBuffering > 25 ? src->mBuffering : 25; if (sizeInMS > threshold) { size = 0; src->mResampledBuffer.resize(len); } else src->mResampledBuffer.resize(size + len); src_float_to_short_array(&rebuf[0], &(src->mResampledBuffer[size]), len); } //#if _DEBUG // if (file && len) // fwrite(&(src->mResampledBuffer[size]), sizeof(short), len, file); //#endif auto remSize = data.input_frames_used * src->mSSpec.channels; src->mQBuffer.erase(src->mQBuffer.begin(), src->mQBuffer.begin() + remSize); OSDebugOut("Resampler: in %ld out %ld used %ld gen %ld, rb: %zd, qb: %zd\n", data.input_frames, data.output_frames, data.input_frames_used, data.output_frames_gen, src->mResampledBuffer.size(), src->mQBuffer.size()); }
std::string AudioSinksManager::get_pa_error() const { return pa_strerror(pa_context_errno(context)); }
/*! * \Write outgoing samples directly to pulseaudio server. * \param playdev Input. Device to which to play the samples. * \param nSamples Input. Number of samples to play. * \param cSamples Input. Sample buffer to play from. * \param report_latency Input. 1 to update \c quisk_sound_state.latencyPlay, 0 otherwise. * \param volume Input. Ratio in [0,1] by which to scale the played samples. */ void quisk_play_pulseaudio(struct sound_dev *dev, int nSamples, complex double *cSamples, int report_latency, double volume) { pa_stream *s = dev->handle; int i=0, n=0; void *fbuffer; int fbuffer_bytes = 0; if( !dev || nSamples <= 0) return; if (dev->cork_status) return; if (report_latency) { // Report the latency, if requested. pa_operation *o; pa_threaded_mainloop_lock(pa_ml); if (!(o = pa_stream_update_timing_info(s, stream_timing_callback, dev))) { printf("pa_stream_update_timing(): %s\n", pa_strerror(pa_context_errno(pa_stream_get_context(s)))); } else { while (pa_operation_get_state(o) == PA_OPERATION_RUNNING) pa_threaded_mainloop_wait(pa_ml); pa_operation_unref(o); } pa_threaded_mainloop_unlock(pa_ml); } fbuffer = pa_xmalloc(nSamples * dev->num_channels * dev->sample_bytes); // Convert from complex data to framebuffer if (dev->sample_bytes == 4) { float fi=0.f, fq=0.f; for(i = 0, n = 0; n < nSamples; i += (dev->num_channels * 4), ++n) { fi = (volume * creal(cSamples[n])) / CLIP32; fq = (volume * cimag(cSamples[n])) / CLIP32; memcpy(fbuffer + i + (dev->channel_I * 4), &fi, 4); memcpy(fbuffer + i + (dev->channel_Q * 4), &fq, 4); } } else if (dev->sample_bytes == 2) { int ii, qq; for(i = 0, n = 0; n < nSamples; i += (dev->num_channels * 2), ++n) { ii = (int)(volume * creal(cSamples[n]) / 65536); qq = (int)(volume * cimag(cSamples[n]) / 65536); memcpy(fbuffer + i + (dev->channel_I * 2), &ii, 2); memcpy(fbuffer + i + (dev->channel_Q * 2), &qq, 2); } } else { printf("Unknown sample size for %s", dev->name); exit(1); } fbuffer_bytes = nSamples * dev->num_channels * dev->sample_bytes; pa_threaded_mainloop_lock(pa_ml); size_t writable = pa_stream_writable_size(s); if (writable > 0) { if ( writable > 1024*1000 ) //sanity check to prevent pa_xmalloc from crashing on monitor streams writable = 1024*1000; if (fbuffer_bytes > writable) { if (quisk_sound_state.verbose_pulse) printf("Truncating write by %u bytes\n", fbuffer_bytes - (int)writable); fbuffer_bytes = writable; } pa_stream_write(dev->handle, fbuffer, (size_t)fbuffer_bytes, NULL, 0, PA_SEEK_RELATIVE); //printf("wrote %d to %s\n", writable, dev->name); } else { if (quisk_sound_state.verbose_pulse) printf("Can't write to stream %s. Dropping %d bytes\n", dev->name, fbuffer_bytes); } pa_threaded_mainloop_unlock(pa_ml); pa_xfree(fbuffer); fbuffer=NULL; }
static void server_info_cb(pa_context *c, const pa_server_info *info, void *userdata) { struct sound_dev **pDevices = userdata; pa_buffer_attr rec_ba; pa_buffer_attr play_ba; pa_sample_spec ss; pa_sample_spec default_ss; pa_stream_flags_t pb_flags = PA_STREAM_NOFLAGS; pa_stream_flags_t rec_flags = PA_STREAM_ADJUST_LATENCY; default_ss = info->sample_spec; printf("Connected to %s \n", info->host_name); while(*pDevices) { struct sound_dev *dev = *pDevices++; const char *dev_name; pa_stream *s; pa_zero(rec_ba); pa_zero(play_ba); if (dev->name[5] == ':') dev_name = dev->name + 6; // the device name is given; "pulse:alsa_input.pci-0000_00_1b.0.analog-stereo" else dev_name = NULL; // the device name is "pulse" for the default device if (quisk_sound_state.verbose_pulse) printf("Opening Device %s ", dev_name); //Construct sample specification. Use S16LE if availiable. Default to Float32 for others. //If the source/sink is not Float32, Pulseaudio will convert it (uses CPU) //dev->sample_bytes = (int)pa_frame_size(&ss) / ss.channels; if (default_ss.format == PA_SAMPLE_S16LE) { dev->sample_bytes = 2; ss.format = default_ss.format; } else { dev->sample_bytes = 4; ss.format = PA_SAMPLE_FLOAT32LE; } ss.rate = dev->sample_rate; ss.channels = dev->num_channels; rec_ba.maxlength = (uint32_t) -1; rec_ba.fragsize = (uint32_t) SAMP_BUFFER_SIZE / 16; //higher numbers eat cpu on reading monitor streams. play_ba.maxlength = (uint32_t) -1; play_ba.prebuf = (uint32_t) (dev->sample_bytes * ss.channels * dev->latency_frames); //play_ba.tlength = (uint32_t) -1; play_ba.tlength = play_ba.prebuf; if (dev->latency_frames == 0) play_ba.minreq = (uint32_t) 0; //verify this is sane else play_ba.minreq = (uint32_t) -1; if (dev->stream_dir_record) { if (!(s = pa_stream_new(c, dev->stream_description, &ss, NULL))) { printf("pa_stream_new() failed: %s", pa_strerror(pa_context_errno(c))); exit(1); } if (pa_stream_connect_record(s, dev_name, &rec_ba, rec_flags) < 0) { printf("pa_stream_connect_record() failed: %s", pa_strerror(pa_context_errno(c))); exit(1); } pa_stream_set_overflow_callback(s, stream_overflow_callback, dev); } else { pa_cvolume cv; pa_volume_t volume = PA_VOLUME_NORM; if (!(s = pa_stream_new(c, dev->stream_description, &ss, NULL))) { printf("pa_stream_new() failed: %s", pa_strerror(pa_context_errno(c))); exit(1); } if (pa_stream_connect_playback(s, dev_name, &play_ba, pb_flags, pa_cvolume_set(&cv, ss.channels, volume), NULL) < 0) { printf("pa_stream_connect_playback() failed: %s", pa_strerror(pa_context_errno(c))); exit(1); } pa_stream_set_underflow_callback(s, stream_underflow_callback, dev); } pa_stream_set_state_callback(s, stream_state_callback, dev); pa_stream_set_started_callback(s, stream_started_callback, dev); dev->handle = (void*)s; //save memory address for stream in handle int i; for(i=0;i < PA_LIST_SIZE;i++) { //save address for stream for easy exit if (!(OpenPulseDevices[i])) { OpenPulseDevices[i] = dev->handle; break; } } } }
void vlc_pa_error (vlc_object_t *obj, const char *msg, pa_context *ctx) { msg_Err (obj, "%s: %s", msg, pa_strerror (pa_context_errno (ctx))); }
/** * @brief Callback function called when PA completed an operation * @param c Context on which operation has succeeded * @param nbytes the number of bytes PA requested * @param this_gen pulse_driver_t pointer for the PulseAudio output * instance. */ static void __xine_pa_context_success_callback(pa_context *c, int success, void *this_gen) { pulse_driver_t *this = (pulse_driver_t*) this_gen; if (!success) xprintf (this->xine, XINE_VERBOSITY_DEBUG, "audio_pulse_out: context operation failed: %s\n", pa_strerror(pa_context_errno(this->context))); pa_threaded_mainloop_signal(this->mainloop, 0); }
INT16 m1sdr_Init(int sample_rate) { int format, stereo, rate, fsize, err, state; unsigned int nfreq, periodtime; snd_pcm_hw_params_t *hwparams; #ifdef USE_SDL SDL_AudioSpec aspec; #endif pa_channel_map chanmap; pa_buffer_attr my_pa_attr; hw_present = 0; m1sdr_Callback = NULL; nDSoundSegLen = sample_rate / 60; switch (lnxdrv_apimode) { case 0: // SDL #ifdef USE_SDL SDL_InitSubSystem(SDL_INIT_AUDIO); m1sdr_SetSamplesPerTick(sample_rate/60); playbuf = 0; writebuf = 1; aspec.freq = sample_rate; aspec.format = AUDIO_S16SYS; // keep endian independant aspec.channels = 2; aspec.samples = 512; // has to be a power of 2, and we want it smaller than our buffer size aspec.callback = sdl_callback; aspec.userdata = 0; if (SDL_OpenAudio(&aspec, NULL) < 0) { printf("ERROR: can't open SDL audio\n"); return 0; } // make sure we don't start yet SDL_PauseAudio(1); #endif break; case 1: // ALSA // Try to open audio device if ((err = snd_pcm_open(&pHandle, "default", SND_PCM_STREAM_PLAYBACK, 0)) < 0) { fprintf(stderr, "ALSA: Could not open soundcard (%s)\n", snd_strerror(err)); hw_present = 0; return 0; } if ((err = snd_pcm_hw_params_malloc(&hwparams)) < 0) { fprintf (stderr, "cannot allocate hardware parameter structure (%s)\n", snd_strerror(err)); return 0; } // Init hwparams with full configuration space if ((err = snd_pcm_hw_params_any(pHandle, hwparams)) < 0) { fprintf(stderr, "ALSA: couldn't set hw params (%s)\n", snd_strerror(err)); hw_present = 0; return 0; } // Set access type if ((err = snd_pcm_hw_params_set_access(pHandle, hwparams, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { fprintf(stderr, "ALSA: can't set access (%s)\n", snd_strerror(err)); return 0; } // Set sample format if ((err = snd_pcm_hw_params_set_format(pHandle, hwparams, SND_PCM_FORMAT_S16)) < 0) { fprintf(stderr, "ALSA: can't set format (%s)\n", snd_strerror(err)); return 0; } // Set sample rate (nearest possible) nfreq = sample_rate; if ((err = snd_pcm_hw_params_set_rate_near(pHandle, hwparams, &nfreq, 0)) < 0) { fprintf(stderr, "ALSA: can't set sample rate (%s)\n", snd_strerror(err)); return 0; } // Set number of channels if ((err = snd_pcm_hw_params_set_channels(pHandle, hwparams, 2)) < 0) { fprintf(stderr, "ALSA: can't set stereo (%s)\n", snd_strerror(err)); return 0; } // Set period time (nearest possible) periodtime = 16; if ((err = snd_pcm_hw_params_set_period_time_near(pHandle, hwparams, &periodtime, 0)) < 0) { fprintf(stderr, "ALSA: can't set period time (%s)\n", snd_strerror(err)); return 0; } // Apply HW parameter settings to PCM device and prepare device if ((err = snd_pcm_hw_params(pHandle, hwparams)) < 0) { fprintf(stderr, "ALSA: unable to install hw_params (%s)\n", snd_strerror(err)); snd_pcm_hw_params_free(hwparams); return 0; } snd_pcm_hw_params_free(hwparams); if ((err = snd_pcm_prepare(pHandle)) < 0) { fprintf (stderr, "cannot prepare audio interface for use (%s)\n", snd_strerror(err)); return 0; } break; case 2: // OSS audiofd = open("/dev/dsp", O_WRONLY, 0); if (audiofd == -1) { audiofd = open("/dev/dsp1", O_WRONLY, 0); if (audiofd == -1) { perror("/dev/dsp1"); return(0); } } // reset things ioctl(audiofd, SNDCTL_DSP_RESET, 0); is_broken_driver = 0; num_frags = NUM_FRAGS_NORMAL; // set the buffer size we want fsize = OSS_FRAGMENT; if (ioctl(audiofd, SNDCTL_DSP_SETFRAGMENT, &fsize) == - 1) { perror("SNDCTL_DSP_SETFRAGMENT"); return(0); } // set 16-bit output format = AFMT_S16_NE; // 16 bit signed "native"-endian if (ioctl(audiofd, SNDCTL_DSP_SETFMT, &format) == - 1) { perror("SNDCTL_DSP_SETFMT"); return(0); } // now set stereo stereo = 1; if (ioctl(audiofd, SNDCTL_DSP_STEREO, &stereo) == - 1) { perror("SNDCTL_DSP_STEREO"); return(0); } // and the sample rate rate = sample_rate; if (ioctl(audiofd, SNDCTL_DSP_SPEED, &rate) == - 1) { perror("SNDCTL_DSP_SPEED"); return(0); } // and make sure that did what we wanted ioctl(audiofd, SNDCTL_DSP_GETBLKSIZE, &fsize); break; case 3: // PulseAudio sample_spec.format = PA_SAMPLE_S16NE; sample_spec.rate = sample_rate; sample_spec.channels = 2; my_pa_context = NULL; my_pa_stream = NULL; my_pa_mainloop = NULL; my_pa_mainloop_api = NULL; #if !PULSE_USE_SIMPLE // get default channel mapping pa_channel_map_init_auto(&chanmap, sample_spec.channels, PA_CHANNEL_MAP_WAVEEX); if (!(my_pa_mainloop = pa_mainloop_new())) { fprintf(stderr, "pa_mainloop_new() failed\n"); return 0; } my_pa_mainloop_api = pa_mainloop_get_api(my_pa_mainloop); /* if (pa_signal_init(my_pa_mainloop_api) != 0) { fprintf(stderr, "pa_signal_init() failed\n"); return 0; }*/ /* Create a new connection context */ if (!(my_pa_context = pa_context_new(my_pa_mainloop_api, "Audio Overload"))) { fprintf(stderr, "pa_context_new() failed\n"); return 0; } /* set the context state CB */ // pa_context_set_state_callback(my_pa_context, context_state_callback, NULL); /* Connect the context */ if (pa_context_connect(my_pa_context, NULL, (pa_context_flags_t)0, NULL) < 0) { fprintf(stderr, "pa_context_connect() failed: %s", pa_strerror(pa_context_errno(my_pa_context))); return 0; } do { pa_mainloop_iterate(my_pa_mainloop, 1, NULL); state = pa_context_get_state(my_pa_context); if (!PA_CONTEXT_IS_GOOD((pa_context_state_t)state)) { printf("PA CONTEXT NOT GOOD\n"); hw_present = 0; return 0; } } while (state != PA_CONTEXT_READY); if (!(my_pa_stream = pa_stream_new(my_pa_context, "Audio Overload", &sample_spec, &chanmap))) { fprintf(stderr, "pa_stream_new() failed: %s\n", pa_strerror(pa_context_errno(my_pa_context))); return 0; } memset(&my_pa_attr, 0, sizeof(my_pa_attr)); my_pa_attr.tlength = nDSoundSegLen * 4 * 4; my_pa_attr.prebuf = -1; my_pa_attr.maxlength = -1; my_pa_attr.minreq = nDSoundSegLen * 4 * 2; if ((err = pa_stream_connect_playback(my_pa_stream, NULL, &my_pa_attr, PA_STREAM_ADJUST_LATENCY, NULL, NULL)) < 0) { fprintf(stderr, "pa_stream_connect_playback() failed: %s\n", pa_strerror(pa_context_errno(my_pa_context))); return 0; } do { pa_mainloop_iterate(my_pa_mainloop, 1, NULL); state = pa_stream_get_state(my_pa_stream); if (!PA_STREAM_IS_GOOD((pa_stream_state_t)state)) { printf("PA STREAM NOT GOOD\n"); hw_present = 0; return 0; } } while (state != PA_STREAM_READY); // printf("PulseAudio setup OK so far, len %d\n", nDSoundSegLen*4); #else my_simple = NULL; #endif break; } hw_present = 1; return (1); }
/** * @brief Callback function called when the state of the daemon changes * @param c Context in which the state of the daemon changes * @param t Subscription event type * @param idx Index of the sink * @param this_gen pulse_driver_t pointer for the PulseAudio output * instance. */ static void __xine_pa_context_subscribe_callback(pa_context *c, pa_subscription_event_type_t t, uint32_t idx, void *this_gen) { pulse_driver_t * this = (pulse_driver_t*) this_gen; int index; if (this->stream == NULL) return; index = pa_stream_get_index(this->stream); if (index != idx) return; if ((t & PA_SUBSCRIPTION_EVENT_TYPE_MASK) != PA_SUBSCRIPTION_EVENT_CHANGE) return; pa_operation *operation = pa_context_get_sink_input_info( this->context, index, __xine_pa_sink_info_callback, this); if (operation == NULL) { xprintf(this->xine, XINE_VERBOSITY_DEBUG, "audio_pulse_out: failed to get sink info: %s\n", pa_strerror(pa_context_errno (this->context))); return; } pa_operation_unref(operation); }
bool AudioOutputPulseAudio::ContextConnect(void) { QString fn_log_tag = "ContextConnect, "; if (pcontext) { VBERROR(fn_log_tag + "context appears to exist, but shouldn't (yet)"); pa_context_unref(pcontext); pcontext = NULL; return false; } pcontext = pa_context_new(pa_threaded_mainloop_get_api(mainloop), "MythTV"); if (!pcontext) { VBERROR(fn_log_tag + "failed to acquire new context"); return false; } pa_context_set_state_callback(pcontext, ContextStateCallback, this); char *pulse_host = ChooseHost(); int chk = pa_context_connect( pcontext, pulse_host, (pa_context_flags_t)0, NULL); delete(pulse_host); if (chk < 0) { VBERROR(fn_log_tag + QString("context connect failed: %1") .arg(pa_strerror(pa_context_errno(pcontext)))); return false; } bool connected = false; pa_context_state_t state = pa_context_get_state(pcontext); for (; !connected; state = pa_context_get_state(pcontext)) { switch(state) { case PA_CONTEXT_READY: VBAUDIO(fn_log_tag +"context connection ready"); connected = true; continue; case PA_CONTEXT_FAILED: case PA_CONTEXT_TERMINATED: VBERROR(fn_log_tag + QString("context connection failed or terminated: %1") .arg(pa_strerror(pa_context_errno(pcontext)))); return false; default: VBAUDIO(fn_log_tag + "waiting for context connection ready"); pa_threaded_mainloop_wait(mainloop); break; } } pa_operation *op = pa_context_get_server_info(pcontext, ServerInfoCallback, this); if (op) pa_operation_unref(op); else VBERROR(fn_log_tag + "failed to get PulseAudio server info"); return true; }
static DECLCALLBACK(int) drvHostPulseAudioInit(PPDMIHOSTAUDIO pInterface) { NOREF(pInterface); LogFlowFuncEnter(); int rc = audioLoadPulseLib(); if (RT_FAILURE(rc)) { LogRel(("PulseAudio: Failed to load the PulseAudio shared library! Error %Rrc\n", rc)); return rc; } bool fLocked = false; do { if (!(g_pMainLoop = pa_threaded_mainloop_new())) { LogRel(("PulseAudio: Failed to allocate main loop: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_NO_MEMORY; break; } if (!(g_pContext = pa_context_new(pa_threaded_mainloop_get_api(g_pMainLoop), "VirtualBox"))) { LogRel(("PulseAudio: Failed to allocate context: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_NO_MEMORY; break; } if (pa_threaded_mainloop_start(g_pMainLoop) < 0) { LogRel(("PulseAudio: Failed to start threaded mainloop: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_GENERAL_FAILURE; /** @todo Find a better rc. */ break; } pa_context_set_state_callback(g_pContext, drvHostPulseAudioCbCtxState, NULL); pa_threaded_mainloop_lock(g_pMainLoop); fLocked = true; if (pa_context_connect(g_pContext, NULL /* pszServer */, PA_CONTEXT_NOFLAGS, NULL) < 0) { LogRel(("PulseAudio: Failed to connect to server: %s\n", pa_strerror(pa_context_errno(g_pContext)))); rc = VERR_GENERAL_FAILURE; /** @todo Find a better rc. */ break; } /* Wait until the g_pContext is ready */ for (;;) { pa_context_state_t cstate; pa_threaded_mainloop_wait(g_pMainLoop); cstate = pa_context_get_state(g_pContext); if (cstate == PA_CONTEXT_READY) break; else if ( cstate == PA_CONTEXT_TERMINATED || cstate == PA_CONTEXT_FAILED) { LogRel(("PulseAudio: Failed to initialize context (state %d)\n", cstate)); rc = VERR_GENERAL_FAILURE; /** @todo Find a better rc. */ break; } } pa_threaded_mainloop_unlock(g_pMainLoop); } while (0); if (RT_FAILURE(rc)) { if (g_pMainLoop) { if (fLocked) pa_threaded_mainloop_unlock(g_pMainLoop); if (g_pMainLoop) pa_threaded_mainloop_stop(g_pMainLoop); } if (g_pContext) { pa_context_disconnect(g_pContext); pa_context_unref(g_pContext); g_pContext = NULL; } if (g_pMainLoop) { pa_threaded_mainloop_free(g_pMainLoop); g_pMainLoop = NULL; } } LogFlowFuncLeaveRC(rc); return rc; }
int main(int argc, char *argv[]) { QString cfg_file; std::string conf; std::string style; bool clierr = false; bool edit_conf = false; int return_code; QApplication app(argc, argv); QCoreApplication::setOrganizationName(GQRX_ORG_NAME); QCoreApplication::setOrganizationDomain(GQRX_ORG_DOMAIN); QCoreApplication::setApplicationName(GQRX_APP_NAME); QCoreApplication::setApplicationVersion(VERSION); // setup controlport via environment variables // see http://lists.gnu.org/archive/html/discuss-gnuradio/2013-05/msg00270.html // Note: tried using gr::prefs().save() but that doesn't have effect until the next time if (qputenv("GR_CONF_CONTROLPORT_ON", "False")) qDebug() << "Controlport disabled"; else qDebug() << "Failed to disable controlport"; // setup the program options po::options_description desc("Command line options"); desc.add_options() ("help,h", "This help message") ("style,s", po::value<std::string>(&style), "Use the give style (fusion, windows)") ("list,l", "List existing configurations") ("conf,c", po::value<std::string>(&conf), "Start with this config file") ("edit,e", "Edit the config file before using it") ("reset,r", "Reset configuration file") ; po::variables_map vm; try { po::store(po::parse_command_line(argc, argv, desc), vm); } catch(const boost::program_options::invalid_command_line_syntax& ex) { /* happens if e.g. -c without file name */ clierr = true; } catch(const boost::program_options::unknown_option& ex) { /* happens if e.g. -c without file name */ clierr = true; } po::notify(vm); // print the help message if (vm.count("help") || clierr) { std::cout << "Gqrx software defined radio receiver " << VERSION << std::endl; std::cout << desc << std::endl; return 1; } if (vm.count("style")) QApplication::setStyle(QString::fromStdString(style)); if (vm.count("list")) { list_conf(); return 0; } // check whether audio backend is functional #ifdef WITH_PORTAUDIO PaError err = Pa_Initialize(); if (err != paNoError) { QString message = QString("Portaudio error: %1").arg(Pa_GetErrorText(err)); qCritical() << message; QMessageBox::critical(0, "Audio Error", message, QMessageBox::Abort, QMessageBox::NoButton); return 1; } #endif #ifdef WITH_PULSEAUDIO int error = 0; pa_simple *test_sink; pa_sample_spec ss; ss.format = PA_SAMPLE_FLOAT32LE; ss.rate = 48000; ss.channels = 2; test_sink = pa_simple_new(NULL, "Gqrx Test", PA_STREAM_PLAYBACK, NULL, "Test stream", &ss, NULL, NULL, &error); if (!test_sink) { QString message = QString("Pulseaudio error: %1").arg(pa_strerror(error)); qCritical() << message; QMessageBox::critical(0, "Audio Error", message, QMessageBox::Abort, QMessageBox::NoButton); return 1; } pa_simple_free(test_sink); #endif if (!conf.empty()) { cfg_file = QString::fromStdString(conf); qDebug() << "User specified config file:" << cfg_file; } else { cfg_file = "default.conf"; qDebug() << "No user supplied config file. Using" << cfg_file; } if (vm.count("reset")) reset_conf(cfg_file); else if (vm.count("edit")) edit_conf = true; // Mainwindow will check whether we have a configuration // and open the config dialog if there is none or the specified // file does not exist. MainWindow w(cfg_file, edit_conf); if (w.configOk) { w.show(); return_code = app.exec(); } else { return_code = 1; } #ifdef WITH_PORTAUDIO Pa_Terminate(); #endif return return_code; }
Error AudioDriverPulseAudio::init_device() { // If there is a specified device check that it is really present if (device_name != "Default") { Array list = get_device_list(); if (list.find(device_name) == -1) { device_name = "Default"; new_device = "Default"; } } // Detect the amount of channels PulseAudio is using // Note: If using an even amount of channels (2, 4, etc) channels and pa_map.channels will be equal, // if not then pa_map.channels will have the real amount of channels PulseAudio is using and channels // will have the amount of channels Godot is using (in this case it's pa_map.channels + 1) detect_channels(); switch (pa_map.channels) { case 1: // Mono case 3: // Surround 2.1 case 5: // Surround 5.0 case 7: // Surround 7.0 channels = pa_map.channels + 1; break; case 2: // Stereo case 4: // Surround 4.0 case 6: // Surround 5.1 case 8: // Surround 7.1 channels = pa_map.channels; break; default: WARN_PRINTS("PulseAudio: Unsupported number of channels: " + itos(pa_map.channels)); pa_channel_map_init_stereo(&pa_map); channels = 2; break; } int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY); buffer_frames = closest_power_of_2(latency * mix_rate / 1000); pa_buffer_size = buffer_frames * pa_map.channels; print_verbose("PulseAudio: detected " + itos(pa_map.channels) + " channels"); print_verbose("PulseAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms"); pa_sample_spec spec; spec.format = PA_SAMPLE_S16LE; spec.channels = pa_map.channels; spec.rate = mix_rate; pa_str = pa_stream_new(pa_ctx, "Sound", &spec, &pa_map); if (pa_str == NULL) { ERR_PRINTS("PulseAudio: pa_stream_new error: " + String(pa_strerror(pa_context_errno(pa_ctx)))); ERR_FAIL_V(ERR_CANT_OPEN); } pa_buffer_attr attr; // set to appropriate buffer length (in bytes) from global settings // Note: PulseAudio defaults to 4 fragments, which means that the actual // latency is tlength / fragments. It seems that the PulseAudio has no way // to get the fragments number so we're hardcoding this to the default of 4 const int fragments = 4; attr.tlength = pa_buffer_size * sizeof(int16_t) * fragments; // set them to be automatically chosen attr.prebuf = (uint32_t)-1; attr.maxlength = (uint32_t)-1; attr.minreq = (uint32_t)-1; const char *dev = device_name == "Default" ? NULL : device_name.utf8().get_data(); pa_stream_flags flags = pa_stream_flags(PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE); int error_code = pa_stream_connect_playback(pa_str, dev, &attr, flags, NULL, NULL); ERR_FAIL_COND_V(error_code < 0, ERR_CANT_OPEN); samples_in.resize(buffer_frames * channels); samples_out.resize(pa_buffer_size); // Reset audio input to keep synchronisation. input_position = 0; input_size = 0; return OK; }
static DECLCALLBACK(int) drvHostPulseAudioInit(PPDMIHOSTAUDIO pInterface) { AssertPtrReturn(pInterface, VERR_INVALID_POINTER); PDRVHOSTPULSEAUDIO pThis = PDMIHOSTAUDIO_2_DRVHOSTPULSEAUDIO(pInterface); LogFlowFuncEnter(); int rc = audioLoadPulseLib(); if (RT_FAILURE(rc)) { LogRel(("PulseAudio: Failed to load the PulseAudio shared library! Error %Rrc\n", rc)); return rc; } pThis->fLoopWait = false; pThis->pMainLoop = NULL; bool fLocked = false; do { if (!(pThis->pMainLoop = pa_threaded_mainloop_new())) { LogRel(("PulseAudio: Failed to allocate main loop: %s\n", pa_strerror(pa_context_errno(pThis->pContext)))); rc = VERR_NO_MEMORY; break; } if (!(pThis->pContext = pa_context_new(pa_threaded_mainloop_get_api(pThis->pMainLoop), "VirtualBox"))) { LogRel(("PulseAudio: Failed to allocate context: %s\n", pa_strerror(pa_context_errno(pThis->pContext)))); rc = VERR_NO_MEMORY; break; } if (pa_threaded_mainloop_start(pThis->pMainLoop) < 0) { LogRel(("PulseAudio: Failed to start threaded mainloop: %s\n", pa_strerror(pa_context_errno(pThis->pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } /* Install a global callback to known if something happens to our acquired context. */ pa_context_set_state_callback(pThis->pContext, paContextCbStateChanged, pThis /* pvUserData */); pa_threaded_mainloop_lock(pThis->pMainLoop); fLocked = true; if (pa_context_connect(pThis->pContext, NULL /* pszServer */, PA_CONTEXT_NOFLAGS, NULL) < 0) { LogRel(("PulseAudio: Failed to connect to server: %s\n", pa_strerror(pa_context_errno(pThis->pContext)))); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } /* Wait until the pThis->pContext is ready. */ for (;;) { if (!pThis->fLoopWait) pa_threaded_mainloop_wait(pThis->pMainLoop); pThis->fLoopWait = false; pa_context_state_t cstate = pa_context_get_state(pThis->pContext); if (cstate == PA_CONTEXT_READY) break; else if ( cstate == PA_CONTEXT_TERMINATED || cstate == PA_CONTEXT_FAILED) { LogRel(("PulseAudio: Failed to initialize context (state %d)\n", cstate)); rc = VERR_AUDIO_BACKEND_INIT_FAILED; break; } } } while (0); if (fLocked) pa_threaded_mainloop_unlock(pThis->pMainLoop); if (RT_FAILURE(rc)) { if (pThis->pMainLoop) pa_threaded_mainloop_stop(pThis->pMainLoop); if (pThis->pContext) { pa_context_disconnect(pThis->pContext); pa_context_unref(pThis->pContext); pThis->pContext = NULL; } if (pThis->pMainLoop) { pa_threaded_mainloop_free(pThis->pMainLoop); pThis->pMainLoop = NULL; } } LogFlowFuncLeaveRC(rc); return rc; }
int softrock_open(void) { #ifdef DIRECTAUDIO int arg; int status; #endif #ifdef PORTAUDIO int rc; int status; #endif #ifdef PULSEAUDIO int error; pa_sample_spec params; pa_buffer_attr attrs; #endif #ifdef PORTAUDIO PaStreamParameters inputParameters; PaStreamParameters outputParameters; PaStreamInfo *info; int devices; int i; PaDeviceInfo* deviceInfo; if (softrock_get_verbose()) fprintf(stderr,"softrock_open: portaudio\n"); #endif #ifdef DIRECTAUDIO if (softrock_get_verbose()) fprintf(stderr,"softrock_open: %s\n",softrock_get_device()); #endif if(softrock_get_playback()) { return 0; } #ifdef PULSEAUDIO if (softrock_get_verbose()) fprintf(stderr,"Using PulseAudio\n"); params.format=PA_SAMPLE_FLOAT32LE; params.rate=softrock_get_sample_rate(); params.channels=2; attrs.maxlength=attrs.minreq=attrs.prebuf=attrs.tlength=(uint32_t)-1; attrs.fragsize=attrs.maxlength=attrs.minreq=attrs.prebuf=(uint32_t)-1; attrs.fragsize=SAMPLES_PER_BUFFER*2 * sizeof(float); attrs.tlength=SAMPLES_PER_BUFFER*2 * sizeof(float); if (softrock_get_verbose()) fprintf(stderr,"params.rate=%d\n",params.rate); stream=pa_simple_new("localhost","Softrock", PA_STREAM_RECORD, NULL, "IQ", ¶ms, NULL, &attrs, &error); if(stream==NULL) { if (softrock_get_verbose()) fprintf(stderr, __FILE__": pa_simple_new() failed: %s\n", pa_strerror(error)); exit(0); } playback_stream=pa_simple_new("localhost","Softrock", PA_STREAM_PLAYBACK, NULL, "IQ", ¶ms, NULL, &attrs, &error); if(playback_stream==NULL) { if (softrock_get_verbose()) fprintf(stderr, __FILE__": pa_simple_new() failed: %s\n", pa_strerror(error)); exit(0); } ftime(&start_time); #endif #ifdef PORTAUDIO if (softrock_get_verbose()) fprintf(stderr,"Using PortAudio\n"); rc=Pa_Initialize(); if(rc!=paNoError) { if (softrock_get_verbose()) fprintf(stderr,"Pa_Initialize failed: %s\n",Pa_GetErrorText(rc)); exit(1); } devices=Pa_GetDeviceCount(); if(devices<0) { if (softrock_get_verbose()) fprintf(stderr,"Px_GetDeviceCount failed: %s\n",Pa_GetErrorText(devices)); } else { if (softrock_get_verbose()) fprintf(stderr,"default input=%d output=%d devices=%d\n",Pa_GetDefaultInputDevice(),Pa_GetDefaultOutputDevice(),devices); for(i=0;i<devices;i++) { deviceInfo=Pa_GetDeviceInfo(i); if (softrock_get_verbose()) fprintf(stderr,"%d - %s\n",i,deviceInfo->name); if (softrock_get_verbose()) fprintf(stderr,"maxInputChannels: %d\n",deviceInfo->maxInputChannels); if (softrock_get_verbose()) fprintf(stderr,"maxOututChannels: %d\n",deviceInfo->maxOutputChannels); //if (softrock_get_verbose()) fprintf(stderr,"defaultLowInputLatency: %f\n",deviceInfo->defaultLowInputLatency); //if (softrock_get_verbose()) fprintf(stderr,"defaultLowOutputLatency: %f\n",deviceInfo->defaultLowOutputLatency); //if (softrock_get_verbose()) fprintf(stderr,"defaultHighInputLatency: %f\n",deviceInfo->defaultHighInputLatency); //if (softrock_get_verbose()) fprintf(stderr,"defaultHighOutputLatency: %f\n",deviceInfo->defaultHighOutputLatency); //if (softrock_get_verbose()) fprintf(stderr,"defaultSampleRate: %f\n",deviceInfo->defaultSampleRate); } } inputParameters.device=atoi(softrock_get_input()); inputParameters.channelCount=2; inputParameters.sampleFormat=paFloat32; inputParameters.suggestedLatency=Pa_GetDeviceInfo(inputParameters.device)->defaultLowInputLatency; inputParameters.hostApiSpecificStreamInfo=NULL; outputParameters.device=atoi(softrock_get_output()); outputParameters.channelCount=2; outputParameters.sampleFormat=paFloat32; outputParameters.suggestedLatency=Pa_GetDeviceInfo(outputParameters.device)->defaultLowOutputLatency; outputParameters.hostApiSpecificStreamInfo=NULL; if (softrock_get_verbose()) fprintf(stderr,"input device=%d output device=%d\n",inputParameters.device,outputParameters.device); rc=Pa_OpenStream(&stream,&inputParameters,&outputParameters,(double)softrock_get_sample_rate(),(unsigned long)SAMPLES_PER_BUFFER,paNoFlag,NULL,NULL); if(rc!=paNoError) { if (softrock_get_verbose()) fprintf(stderr,"Pa_OpenStream failed: %s\n",Pa_GetErrorText(rc)); exit(1); } rc=Pa_StartStream(stream); if(rc!=paNoError) { if (softrock_get_verbose()) fprintf(stderr,"Pa_StartStream failed: %s\n",Pa_GetErrorText(rc)); exit(1); } info=Pa_GetStreamInfo(stream); if(info!=NULL) { if (softrock_get_verbose()) fprintf(stderr,"stream.sampleRate=%f\n",info->sampleRate); if (softrock_get_verbose()) fprintf(stderr,"stream.inputLatency=%f\n",info->inputLatency); if (softrock_get_verbose()) fprintf(stderr,"stream.outputLatency=%f\n",info->outputLatency); } else { if (softrock_get_verbose()) fprintf(stderr,"Pa_GetStreamInfo returned NULL\n"); } #endif #ifdef DIRECTAUDIO if (softrock_get_verbose()) fprintf(stderr,"Using direct audio\n"); /* open sound device */ fd = open(softrock_get_device(), O_RDWR); if (fd < 0) { perror("open of audio device failed"); exit(1); } /* set sampling parameters */ arg = SAMPLE_SIZE; /* sample size */ status = ioctl(fd, SOUND_PCM_WRITE_BITS, &arg); if (status == -1) perror("SOUND_PCM_WRITE_BITS ioctl failed"); if (arg != SAMPLE_SIZE) perror("unable to set write sample size"); status = ioctl(fd, SOUND_PCM_READ_BITS, &arg); if (status == -1) perror("SOUND_PCM_READ_BITS ioctl failed"); if (arg != SAMPLE_SIZE) perror("unable to set read sample size"); arg = CHANNELS; /* mono or stereo */ status = ioctl(fd, SOUND_PCM_WRITE_CHANNELS, &arg); if (status == -1) perror("SOUND_PCM_WRITE_CHANNELS ioctl failed"); if (arg != CHANNELS) perror("unable to set number of channels"); arg = softrock_get_sample_rate(); /* sampling rate */ if (softrock_get_verbose()) fprintf(stderr,"sample_rate: %d\n",arg); status = ioctl(fd, SOUND_PCM_WRITE_RATE, &arg); if (status == -1) perror("SOUND_PCM_WRITE_WRITE ioctl failed"); arg = AFMT_S16_LE; /* signed little endian */ status = ioctl(fd, SOUND_PCM_SETFMT, &arg); if (status == -1) perror("SOUND_PCM_SETFMTS ioctl failed"); #endif InitBuf(&rx_r, "RX_R"); InitBuf(&rx_l, "RX_R"); printf("my ring buffers are set\n"); return 0; }
void PulseAudioPlayer::OpenStream() { if (open) CloseStream(); // Initialise a mainloop //printf("Initialising threaded main loop\n"); mainloop = pa_threaded_mainloop_new(); if (!mainloop) { throw agi::AudioPlayerOpenError("Failed to initialise PulseAudio threaded mainloop object", 0); } //printf("Starting main loop\n"); pa_threaded_mainloop_start(mainloop); // Create context //printf("Creating context\n"); context = pa_context_new(pa_threaded_mainloop_get_api(mainloop), "Aegisub"); if (!context) { pa_threaded_mainloop_free(mainloop); throw agi::AudioPlayerOpenError("Failed to create PulseAudio context", 0); } pa_context_set_state_callback(context, (pa_context_notify_cb_t)pa_context_notify, this); // Connect the context //printf("Connecting context\n"); pa_context_connect(context, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); // Wait for connection while (true) { context_notify.Wait(); if (cstate == PA_CONTEXT_READY) { break; } else if (cstate == PA_CONTEXT_FAILED) { // eww paerror = pa_context_errno(context); pa_context_unref(context); pa_threaded_mainloop_stop(mainloop); pa_threaded_mainloop_free(mainloop); throw agi::AudioPlayerOpenError(std::string("PulseAudio reported error: ") + pa_strerror(paerror), 0); } // otherwise loop once more } //printf("Context connected\n"); // Set up stream bpf = provider->GetChannels() * provider->GetBytesPerSample(); pa_sample_spec ss; ss.format = PA_SAMPLE_S16LE; // FIXME ss.rate = provider->GetSampleRate(); ss.channels = provider->GetChannels(); pa_channel_map map; pa_channel_map_init_auto(&map, ss.channels, PA_CHANNEL_MAP_DEFAULT); //printf("Creating stream\n"); stream = pa_stream_new(context, "Sound", &ss, &map); if (!stream) { // argh! pa_context_disconnect(context); pa_context_unref(context); pa_threaded_mainloop_stop(mainloop); pa_threaded_mainloop_free(mainloop); throw agi::AudioPlayerOpenError("PulseAudio could not create stream", 0); } pa_stream_set_state_callback(stream, (pa_stream_notify_cb_t)pa_stream_notify, this); pa_stream_set_write_callback(stream, (pa_stream_request_cb_t)pa_stream_write, this); // Connect stream //printf("Connecting playback stream\n"); paerror = pa_stream_connect_playback(stream, NULL, NULL, (pa_stream_flags_t)(PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_NOT_MONOTONOUS|PA_STREAM_AUTO_TIMING_UPDATE), NULL, NULL); if (paerror) { printf("PulseAudio reported error: %s (%d)\n", pa_strerror(paerror), paerror); throw agi::AudioPlayerOpenError(std::string("PulseAudio reported error: ") + pa_strerror(paerror), 0); } while (true) { stream_notify.Wait(); if (sstate == PA_STREAM_READY) { break; } else if (sstate == PA_STREAM_FAILED) { paerror = pa_context_errno(context); printf("PulseAudio player: Stream connection failed: %s (%d)\n", pa_strerror(paerror), paerror); throw agi::AudioPlayerOpenError("PulseAudio player: Something went wrong connecting the stream", 0); } } //printf("Connected playback stream, now playing\n\n"); // Hopefully this marks success //printf("Finished opening PulseAudio\n\n"); open = true; }
int softrock_write(float* left_samples,float* right_samples) { int rc; int i; int error; float audio_buffer[SAMPLES_PER_BUFFER*2]; rc=0; if(usb2sdr){ } else{ // interleave samples for(i=0;i<SAMPLES_PER_BUFFER;i++) { audio_buffer[i*2]=right_samples[i]; audio_buffer[(i*2)+1]=left_samples[i]; } rc = pa_simple_write(playback_stream, audio_buffer, sizeof(audio_buffer),&error); if (rc < 0) {if (softrock_get_verbose()) fprintf(stderr,"error writing audio_buffer %s (rc=%d)\n", pa_strerror(error), rc);} } return rc; }
/* * Callback to be called whenever new data may be written to the * playback data stream */ static void stream_write_callback(pa_stream *stream, size_t length, void *userdata) { struct context *ctx = userdata; struct audio_file *file; struct pa_operation *operation; size_t to_write, write_unit; int ret; assert(ctx); assert(ctx->context); assert((file = ctx->file)); assert(file->buf); assert(file->readi <= file->size); /* Writes must be in multiple of audio sample size * channel count */ write_unit = pa_frame_size(&file->spec); to_write = file->size - file->readi; to_write = min(length, to_write); to_write -= (to_write % write_unit); ret = pa_stream_write(stream, &file->buf[file->readi], to_write, NULL, 0, PA_SEEK_RELATIVE); if (ret < 0) { error("Failed writing audio data to stream: %s", pa_strerror(pa_context_errno(ctx->context))); goto fail; } file->readi += to_write; assert(file->readi <= file->size); /* * EOF! yay .. * * When reaching audio EOF, do not just close the application! * Doing so leads to losing playback of the latest portion of * the audio file (~ 0.5 seconds). Moreover, it produces ugly, * quite loud, sound cracks :-( * * The playback stream needs to be drained first. Thus close * the application, and the PA event loop, only after getting * a confirmation that the stream drain is complete. */ if ((file->size - file->readi) < write_unit) { out("Success! - Reached end of file"); out("Draining playback stream before exit"); /* Don't invoke our write callback again */ pa_stream_set_write_callback(stream, NULL, NULL); operation = pa_stream_drain(stream, stream_drain_complete, ctx); if (!operation) { error("Could not drain playback stream: %s", pa_strerror(pa_context_errno(ctx->context))); goto fail; } } return; fail: quit(ctx, EXIT_FAILURE); }
int softrock_read(float* left_samples,float* right_samples) { int rc=0; int error; int i; float audio_buffer[SAMPLES_PER_BUFFER*2]; int e; unsigned long stat_r, stat_w; static unsigned long error_block[16]; int r; //for return values int actual; //used to find out how many bytes were written volatile int blocks,samps; if(usb2sdr){ r = libusb_bulk_transfer(usb2sdr_handle, (0x81 | LIBUSB_ENDPOINT_IN), (unsigned char*)inRecord, sizeof(inRecord), &actual, 20); if(r == 0 && actual == sizeof(inRecord)){ //printf("R"); } else{ printf("Read Error"); if(r == LIBUSB_ERROR_TIMEOUT) printf("LIBUSB_ERROR_TIMEOUT"); if(r == LIBUSB_ERROR_PIPE) printf("LIBUSB_ERROR_PIPE"); if(r == LIBUSB_ERROR_OVERFLOW) printf("LIBUSB_ERROR_OVERFLOW"); if(r == LIBUSB_ERROR_NO_DEVICE) printf("LIBUSB_ERROR_NO_DEVICE"); } for(blocks=0; blocks<16; blocks++){ //error checking if(inRecord[blocks].ControlFlagValid == 0xAB){ //this is the block that carries the controlinformation if(inRecord[blocks].ReadBadBlocksReportedFromDevice > error_block[blocks]){ error_block[blocks] = inRecord[blocks].ReadBadBlocksReportedFromDevice; printf("--> Cyl:%d Block:%d R_Errors:%ld\n", i, blocks,inRecord[blocks].ReadBadBlocksReportedFromDevice); } //printf("Cyl:%d Block:%d RunCnt:%d CtrlFlag:0x%X Rerrors:%d Werrors:%d\n",i, blocks, inRecord[blocks].RunningCounter,inRecord[blocks].ControlFlagValid,inRecord[blocks].ReadBadBlocksReportedFromDevice,inRecord[blocks].WriteBadBlocksReportedFromDevice); } for(samps=0;samps<32*4;samps+=4){ e=0; e = (int)(inRecord[blocks].LeftChannelIQSamplePack[samps])<<24; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+1])<<16; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+2])<<8; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+3]); WriteToBuf(&rx_l, e/(2147483648.0)); e=0; e = (int)(inRecord[blocks].RightChannelIQSamplePack[samps])<<24; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+1])<<16; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+2])<<8; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+3]); WriteToBuf(&rx_r, e/(2147483648.0)); } } //write record to USB2SDR for(blocks=0; blocks<16; blocks++){ for(samps=0;samps<32*4;samps+=4){ //e = ReadFromBuf(&tx_l); e = (int)(inRecord[blocks].LeftChannelIQSamplePack[samps])<<24; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+1])<<16; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+2])<<8; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+3]); //left_samples[i] = e/(2147483648.0);//works //*p_rx_l = e/(2147483648.0); //p_rx_l++; e=0; e = (int)(inRecord[blocks].RightChannelIQSamplePack[samps])<<24; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+1])<<16; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+2])<<8; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+3]); //right_samples[i] = e/(2147483648.0);//works //*p_rx_l = e/(2147483648.0); //p_rx_l++; //i++;//works } } r = libusb_bulk_transfer(usb2sdr_handle, (2 | LIBUSB_ENDPOINT_OUT), (unsigned char*)outRecord, sizeof(outRecord), &actual, 20); //my device's out endpoint was 2, found with trial- the device had 2 endpoints: 2 and 129 if(r == 0 && actual == sizeof(outRecord)){ //printf("W"); } else{ printf("Write Error"); if(r == LIBUSB_ERROR_TIMEOUT) printf("LIBUSB_ERROR_TIMEOUT"); if(r == LIBUSB_ERROR_PIPE) printf("LIBUSB_ERROR_PIPE"); if(r == LIBUSB_ERROR_OVERFLOW) printf("LIBUSB_ERROR_OVERFLOW"); if(r == LIBUSB_ERROR_NO_DEVICE) printf("LIBUSB_ERROR_NO_DEVICE"); } //the second package 512 samples //read r = libusb_bulk_transfer(usb2sdr_handle, (0x81 | LIBUSB_ENDPOINT_IN), (unsigned char*)inRecord, sizeof(inRecord), &actual, 20); if(r == 0 && actual == sizeof(inRecord)){ //printf("R"); } else{ printf("Read Error"); if(r == LIBUSB_ERROR_TIMEOUT) printf("LIBUSB_ERROR_TIMEOUT"); if(r == LIBUSB_ERROR_PIPE) printf("LIBUSB_ERROR_PIPE"); if(r == LIBUSB_ERROR_OVERFLOW) printf("LIBUSB_ERROR_OVERFLOW"); if(r == LIBUSB_ERROR_NO_DEVICE) printf("LIBUSB_ERROR_NO_DEVICE"); } for(blocks=0; blocks<16; blocks++){ //error checking if(inRecord[blocks].ControlFlagValid == 0xAB){ //this is the block that carries the controlinformation if(inRecord[blocks].ReadBadBlocksReportedFromDevice > error_block[blocks]){ error_block[blocks] = inRecord[blocks].ReadBadBlocksReportedFromDevice; printf("--> Cyl:%d Block:%d R_Errors:%ld\n", i, blocks,inRecord[blocks].ReadBadBlocksReportedFromDevice); } //printf("Cyl:%d Block:%d RunCnt:%d CtrlFlag:0x%X Rerrors:%d Werrors:%d\n",i, blocks, inRecord[blocks].RunningCounter,inRecord[blocks].ControlFlagValid,inRecord[blocks].ReadBadBlocksReportedFromDevice,inRecord[blocks].WriteBadBlocksReportedFromDevice); } for(samps=0;samps<32*4;samps+=4){ e=0; e = (int)(inRecord[blocks].LeftChannelIQSamplePack[samps])<<24; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+1])<<16; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+2])<<8; e |= (unsigned char)(inRecord[blocks].LeftChannelIQSamplePack[samps+3]); WriteToBuf(&rx_l, e/(2147483648.0)); e=0; e = (int)(inRecord[blocks].RightChannelIQSamplePack[samps])<<24; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+1])<<16; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+2])<<8; e |= (unsigned char)(inRecord[blocks].RightChannelIQSamplePack[samps+3]); WriteToBuf(&rx_r, e/(2147483648.0)); } } //write record to USB2SDR r = libusb_bulk_transfer(usb2sdr_handle, (2 | LIBUSB_ENDPOINT_OUT), (unsigned char*)outRecord, sizeof(outRecord), &actual, 20); //my device's out endpoint was 2, found with trial- the device had 2 endpoints: 2 and 129 if(r == 0 && actual == sizeof(outRecord)){ //printf("W"); } else{ printf("Write Error"); if(r == LIBUSB_ERROR_TIMEOUT) printf("LIBUSB_ERROR_TIMEOUT"); if(r == LIBUSB_ERROR_PIPE) printf("LIBUSB_ERROR_PIPE"); if(r == LIBUSB_ERROR_OVERFLOW) printf("LIBUSB_ERROR_OVERFLOW"); if(r == LIBUSB_ERROR_NO_DEVICE) printf("LIBUSB_ERROR_NO_DEVICE"); } for(i=0;i<SAMPLES_PER_BUFFER;i++) { left_samples[i]=ReadFromBuf(&rx_l); right_samples[i]=ReadFromBuf(&rx_r); } } else{ if(softrock_get_playback()) { softrock_playback_buffer((char *)audio_buffer,sizeof(audio_buffer)); } else { //if (softrock_get_verbose()) fprintf(stderr,"read available=%ld\n",Pa_GetStreamReadAvailable(stream)); //ftime(&start_time); rc=pa_simple_read(stream,&audio_buffer[0],sizeof(audio_buffer),&error); if(rc<0) { if (softrock_get_verbose()) fprintf(stderr,"error reading audio_buffer %s (rc=%d)\n", pa_strerror(error),rc); } //ftime(&end_time); //if (softrock_get_verbose()) fprintf(stderr,"read %d bytes in %ld ms\n",sizeof(audio_buffer),((end_time.time*1000)+end_time.millitm)-((start_time.time*1000)+start_time.millitm)); } // record the I/Q samples if(softrock_get_record()) { softrock_record_buffer((char *)audio_buffer,sizeof(audio_buffer)); } // de-interleave samples for(i=0;i<SAMPLES_PER_BUFFER;i++) { if(softrock_get_iq()) { left_samples[i]=audio_buffer[i*2]; right_samples[i]=audio_buffer[(i*2)+1]; } else { right_samples[i]=audio_buffer[i*2]; left_samples[i]=audio_buffer[(i*2)+1]; } if(timing) { sample_count++; if(sample_count==softrock_get_sample_rate()) { ftime(&end_time); if (softrock_get_verbose()) fprintf(stderr,"%d samples in %ld ms\n",sample_count,((end_time.time*1000)+end_time.millitm)-((start_time.time*1000)+start_time.millitm)); sample_count=0; ftime(&start_time); } } } }//not usb2sdr return rc; }
static gboolean gst_pulsesrc_prepare (GstAudioSrc * asrc, GstRingBufferSpec * spec) { pa_buffer_attr wanted; const pa_buffer_attr *actual; GstPulseSrc *pulsesrc = GST_PULSESRC_CAST (asrc); pa_threaded_mainloop_lock (pulsesrc->mainloop); wanted.maxlength = -1; wanted.tlength = -1; wanted.prebuf = 0; wanted.minreq = -1; wanted.fragsize = spec->segsize; GST_INFO_OBJECT (pulsesrc, "maxlength: %d", wanted.maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d", wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", wanted.prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d", wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d", wanted.fragsize); if (pa_stream_connect_record (pulsesrc->stream, pulsesrc->device, &wanted, PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NOT_MONOTONOUS | #ifdef HAVE_PULSE_0_9_11 PA_STREAM_ADJUST_LATENCY | #endif PA_STREAM_START_CORKED) < 0) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } pulsesrc->corked = TRUE; for (;;) { pa_stream_state_t state; state = pa_stream_get_state (pulsesrc->stream); if (!PA_STREAM_IS_GOOD (state)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } if (state == PA_STREAM_READY) break; /* Wait until the stream is ready */ pa_threaded_mainloop_wait (pulsesrc->mainloop); } /* get the actual buffering properties now */ actual = pa_stream_get_buffer_attr (pulsesrc->stream); GST_INFO_OBJECT (pulsesrc, "maxlength: %d", actual->maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d (wanted: %d)", actual->tlength, wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", actual->prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d (wanted %d)", actual->minreq, wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d (wanted %d)", actual->fragsize, wanted.fragsize); if (actual->fragsize >= wanted.fragsize) { spec->segsize = actual->fragsize; } else { spec->segsize = actual->fragsize * (wanted.fragsize / actual->fragsize); } spec->segtotal = actual->maxlength / spec->segsize; pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; unlock_and_fail: { gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return FALSE; } }
int main(int argc, char *argv[]) { pa_mainloop* m = NULL; int ret = 1, c; char *bn, *server = NULL; pa_time_event *time_event = NULL; const char *filename = NULL; /* type for pa_read/_write. passed as userdata to the callbacks */ unsigned long type = 0; static const struct option long_options[] = { {"record", 0, NULL, 'r'}, {"playback", 0, NULL, 'p'}, {"device", 1, NULL, 'd'}, {"server", 1, NULL, 's'}, {"client-name", 1, NULL, 'n'}, {"stream-name", 1, NULL, ARG_STREAM_NAME}, {"version", 0, NULL, ARG_VERSION}, {"help", 0, NULL, 'h'}, {"verbose", 0, NULL, 'v'}, {"volume", 1, NULL, ARG_VOLUME}, {"rate", 1, NULL, ARG_SAMPLERATE}, {"format", 1, NULL, ARG_SAMPLEFORMAT}, {"channels", 1, NULL, ARG_CHANNELS}, {"channel-map", 1, NULL, ARG_CHANNELMAP}, {"fix-format", 0, NULL, ARG_FIX_FORMAT}, {"fix-rate", 0, NULL, ARG_FIX_RATE}, {"fix-channels", 0, NULL, ARG_FIX_CHANNELS}, {"no-remap", 0, NULL, ARG_NO_REMAP}, {"no-remix", 0, NULL, ARG_NO_REMIX}, {"latency", 1, NULL, ARG_LATENCY}, {"process-time", 1, NULL, ARG_PROCESS_TIME}, {"property", 1, NULL, ARG_PROPERTY}, {"raw", 0, NULL, ARG_RAW}, {"passthrough", 0, NULL, ARG_PASSTHROUGH}, {"file-format", 2, NULL, ARG_FILE_FORMAT}, {"list-file-formats", 0, NULL, ARG_LIST_FILE_FORMATS}, {"latency-msec", 1, NULL, ARG_LATENCY_MSEC}, {"process-time-msec", 1, NULL, ARG_PROCESS_TIME_MSEC}, {NULL, 0, NULL, 0} }; setlocale(LC_ALL, ""); #ifdef ENABLE_NLS bindtextdomain(GETTEXT_PACKAGE, PULSE_LOCALEDIR); #endif bn = pa_path_get_filename(argv[0]); if (strstr(bn, "play")) { mode = PLAYBACK; raw = FALSE; } else if (strstr(bn, "record")) { mode = RECORD; raw = FALSE; } else if (strstr(bn, "cat")) { mode = PLAYBACK; raw = TRUE; } if (strstr(bn, "rec") || strstr(bn, "mon")) { mode = RECORD; raw = TRUE; } proplist = pa_proplist_new(); while ((c = getopt_long(argc, argv, "rpd:s:n:hv", long_options, NULL)) != -1) { switch (c) { case 'h' : help(bn); ret = 0; goto quit; case ARG_VERSION: printf(_("pacat %s\n" "Compiled with libpulse %s\n" "Linked with libpulse %s\n"), PACKAGE_VERSION, pa_get_headers_version(), pa_get_library_version()); ret = 0; goto quit; case 'r': mode = RECORD; break; case 'p': mode = PLAYBACK; break; case 'd': pa_xfree(device); device = pa_xstrdup(optarg); break; case 's': pa_xfree(server); server = pa_xstrdup(optarg); break; case 'n': { char *t; if (!(t = pa_locale_to_utf8(optarg)) || pa_proplist_sets(proplist, PA_PROP_APPLICATION_NAME, t) < 0) { pa_log(_("Invalid client name '%s'"), t ? t : optarg); pa_xfree(t); goto quit; } pa_xfree(t); break; } case ARG_STREAM_NAME: { char *t; if (!(t = pa_locale_to_utf8(optarg)) || pa_proplist_sets(proplist, PA_PROP_MEDIA_NAME, t) < 0) { pa_log(_("Invalid stream name '%s'"), t ? t : optarg); pa_xfree(t); goto quit; } pa_xfree(t); break; } case 'v': verbose = 1; break; case ARG_VOLUME: { int v = atoi(optarg); volume = v < 0 ? 0U : (pa_volume_t) v; volume_is_set = TRUE; break; } case ARG_CHANNELS: sample_spec.channels = (uint8_t) atoi(optarg); sample_spec_set = TRUE; break; case ARG_SAMPLEFORMAT: sample_spec.format = pa_parse_sample_format(optarg); sample_spec_set = TRUE; break; case ARG_SAMPLERATE: sample_spec.rate = (uint32_t) atoi(optarg); sample_spec_set = TRUE; break; case ARG_CHANNELMAP: if (!pa_channel_map_parse(&channel_map, optarg)) { pa_log(_("Invalid channel map '%s'"), optarg); goto quit; } channel_map_set = TRUE; break; case ARG_FIX_CHANNELS: flags |= PA_STREAM_FIX_CHANNELS; break; case ARG_FIX_RATE: flags |= PA_STREAM_FIX_RATE; break; case ARG_FIX_FORMAT: flags |= PA_STREAM_FIX_FORMAT; break; case ARG_NO_REMIX: flags |= PA_STREAM_NO_REMIX_CHANNELS; break; case ARG_NO_REMAP: flags |= PA_STREAM_NO_REMAP_CHANNELS; break; case ARG_LATENCY: if (((latency = (size_t) atoi(optarg))) <= 0) { pa_log(_("Invalid latency specification '%s'"), optarg); goto quit; } break; case ARG_PROCESS_TIME: if (((process_time = (size_t) atoi(optarg))) <= 0) { pa_log(_("Invalid process time specification '%s'"), optarg); goto quit; } break; case ARG_LATENCY_MSEC: if (((latency_msec = (int32_t) atoi(optarg))) <= 0) { pa_log(_("Invalid latency specification '%s'"), optarg); goto quit; } break; case ARG_PROCESS_TIME_MSEC: if (((process_time_msec = (int32_t) atoi(optarg))) <= 0) { pa_log(_("Invalid process time specification '%s'"), optarg); goto quit; } break; case ARG_PROPERTY: { char *t; if (!(t = pa_locale_to_utf8(optarg)) || pa_proplist_setp(proplist, t) < 0) { pa_xfree(t); pa_log(_("Invalid property '%s'"), optarg); goto quit; } pa_xfree(t); break; } case ARG_RAW: raw = TRUE; break; case ARG_PASSTHROUGH: flags |= PA_STREAM_PASSTHROUGH; break; case ARG_FILE_FORMAT: if (optarg) { if ((file_format = pa_sndfile_format_from_string(optarg)) < 0) { pa_log(_("Unknown file format %s."), optarg); goto quit; } } raw = FALSE; break; case ARG_LIST_FILE_FORMATS: pa_sndfile_dump_formats(); ret = 0; goto quit; default: goto quit; } } if (!pa_sample_spec_valid(&sample_spec)) { pa_log(_("Invalid sample specification")); goto quit; } if (optind+1 == argc) { int fd; filename = argv[optind]; if ((fd = pa_open_cloexec(argv[optind], mode == PLAYBACK ? O_RDONLY : O_WRONLY|O_TRUNC|O_CREAT, 0666)) < 0) { pa_log(_("open(): %s"), strerror(errno)); goto quit; } if (dup2(fd, mode == PLAYBACK ? STDIN_FILENO : STDOUT_FILENO) < 0) { pa_log(_("dup2(): %s"), strerror(errno)); goto quit; } pa_close(fd); } else if (optind+1 <= argc) { pa_log(_("Too many arguments.")); goto quit; } if (!raw) { SF_INFO sfi; pa_zero(sfi); if (mode == RECORD) { /* This might patch up the sample spec */ if (pa_sndfile_write_sample_spec(&sfi, &sample_spec) < 0) { pa_log(_("Failed to generate sample specification for file.")); goto quit; } if (file_format <= 0) { char *extension; if (filename && (extension = strrchr(filename, '.'))) file_format = pa_sndfile_format_from_string(extension+1); if (file_format <= 0) file_format = SF_FORMAT_WAV; /* Transparently upgrade classic .wav to wavex for multichannel audio */ if (file_format == SF_FORMAT_WAV && (sample_spec.channels > 2 || (channel_map_set && !(sample_spec.channels == 1 && channel_map.map[0] == PA_CHANNEL_POSITION_MONO) && !(sample_spec.channels == 2 && channel_map.map[0] == PA_CHANNEL_POSITION_LEFT && channel_map.map[1] == PA_CHANNEL_POSITION_RIGHT)))) file_format = SF_FORMAT_WAVEX; } sfi.format |= file_format; } if (!(sndfile = sf_open_fd(mode == RECORD ? STDOUT_FILENO : STDIN_FILENO, mode == RECORD ? SFM_WRITE : SFM_READ, &sfi, 0))) { pa_log(_("Failed to open audio file.")); goto quit; } if (mode == PLAYBACK) { if (sample_spec_set) pa_log(_("Warning: specified sample specification will be overwritten with specification from file.")); if (pa_sndfile_read_sample_spec(sndfile, &sample_spec) < 0) { pa_log(_("Failed to determine sample specification from file.")); goto quit; } sample_spec_set = TRUE; if (!channel_map_set) { /* Allow the user to overwrite the channel map on the command line */ if (pa_sndfile_read_channel_map(sndfile, &channel_map) < 0) { if (sample_spec.channels > 2) pa_log(_("Warning: Failed to determine channel map from file.")); } else channel_map_set = TRUE; } } } if (!channel_map_set) pa_channel_map_init_extend(&channel_map, sample_spec.channels, PA_CHANNEL_MAP_DEFAULT); if (!pa_channel_map_compatible(&channel_map, &sample_spec)) { pa_log(_("Channel map doesn't match sample specification")); goto quit; } if (!raw) { pa_proplist *sfp; if (mode == PLAYBACK) readf_function = pa_sndfile_readf_function(&sample_spec); else { if (pa_sndfile_write_channel_map(sndfile, &channel_map) < 0) pa_log(_("Warning: failed to write channel map to file.")); writef_function = pa_sndfile_writef_function(&sample_spec); } /* Fill in libsndfile prop list data */ sfp = pa_proplist_new(); pa_sndfile_init_proplist(sndfile, sfp); pa_proplist_update(proplist, PA_UPDATE_MERGE, sfp); pa_proplist_free(sfp); } if (verbose) { char tss[PA_SAMPLE_SPEC_SNPRINT_MAX], tcm[PA_CHANNEL_MAP_SNPRINT_MAX]; pa_log(_("Opening a %s stream with sample specification '%s' and channel map '%s'."), mode == RECORD ? _("recording") : _("playback"), pa_sample_spec_snprint(tss, sizeof(tss), &sample_spec), pa_channel_map_snprint(tcm, sizeof(tcm), &channel_map)); } /* Fill in client name if none was set */ if (!pa_proplist_contains(proplist, PA_PROP_APPLICATION_NAME)) { char *t; if ((t = pa_locale_to_utf8(bn))) { pa_proplist_sets(proplist, PA_PROP_APPLICATION_NAME, t); pa_xfree(t); } } /* Fill in media name if none was set */ if (!pa_proplist_contains(proplist, PA_PROP_MEDIA_NAME)) { const char *t; if ((t = filename) || (t = pa_proplist_gets(proplist, PA_PROP_APPLICATION_NAME))) pa_proplist_sets(proplist, PA_PROP_MEDIA_NAME, t); if (!pa_proplist_contains(proplist, PA_PROP_MEDIA_NAME)) { pa_log(_("Failed to set media name.")); goto quit; } } /* Set up a new main loop */ if (!(m = pa_mainloop_new())) { pa_log(_("pa_mainloop_new() failed.")); goto quit; } mainloop_api = pa_mainloop_get_api(m); pa_assert_se(pa_signal_init(mainloop_api) == 0); pa_signal_new(SIGINT, exit_signal_callback, NULL); pa_signal_new(SIGTERM, exit_signal_callback, NULL); #ifdef SIGUSR1 pa_signal_new(SIGUSR1, sigusr1_signal_callback, NULL); #endif pa_disable_sigpipe(); if (raw) { #ifdef OS_IS_WIN32 /* need to turn on binary mode for stdio io. Windows, meh */ setmode(mode == PLAYBACK ? STDIN_FILENO : STDOUT_FILENO, O_BINARY); #endif if (!(stdio_event = mainloop_api->io_new(mainloop_api, mode == PLAYBACK ? STDIN_FILENO : STDOUT_FILENO, mode == PLAYBACK ? PA_IO_EVENT_INPUT : PA_IO_EVENT_OUTPUT, mode == PLAYBACK ? stdin_callback : stdout_callback, &type))) { pa_log(_("io_new() failed.")); goto quit; } } /* Create a new connection context */ if (!(context = pa_context_new_with_proplist(mainloop_api, NULL, proplist))) { pa_log(_("pa_context_new() failed.")); goto quit; } pa_context_set_state_callback(context, context_state_callback, NULL); /* Connect the context */ if (pa_context_connect(context, server, 0, NULL) < 0) { pa_log(_("pa_context_connect() failed: %s"), pa_strerror(pa_context_errno(context))); goto quit; } if (verbose) { if (!(time_event = pa_context_rttime_new(context, pa_rtclock_now() + TIME_EVENT_USEC, time_event_callback, NULL))) { pa_log(_("pa_context_rttime_new() failed.")); goto quit; } } /* Run the main loop */ if (pa_mainloop_run(m, &ret) < 0) { pa_log(_("pa_mainloop_run() failed.")); goto quit; } quit: if (stream) pa_stream_unref(stream); if (context) pa_context_unref(context); if (stdio_event) { pa_assert(mainloop_api); mainloop_api->io_free(stdio_event); } if (time_event) { pa_assert(mainloop_api); mainloop_api->time_free(time_event); } if (m) { pa_signal_done(); pa_mainloop_free(m); } pa_xfree(silence_buffer); pa_xfree(buffer); pa_xfree(server); pa_xfree(device); if (sndfile) sf_close(sndfile); if (proplist) pa_proplist_free(proplist); return ret; }