static pa_stream *connect_playback_stream(const char *device_name, pa_threaded_mainloop *loop, pa_context *context, pa_stream_flags_t flags, pa_buffer_attr *attr, pa_sample_spec *spec, pa_channel_map *chanmap) { pa_stream_state_t state; pa_stream *stream; stream = pa_stream_new_with_proplist(context, "Playback Stream", spec, chanmap, prop_filter); if(!stream) { ERR("pa_stream_new_with_proplist() failed: %s\n", pa_strerror(pa_context_errno(context))); return NULL; } pa_stream_set_state_callback(stream, stream_state_callback, loop); if(pa_stream_connect_playback(stream, device_name, attr, flags, NULL, NULL) < 0) { ERR("Stream did not connect: %s\n", pa_strerror(pa_context_errno(context))); pa_stream_unref(stream); return NULL; } while((state=pa_stream_get_state(stream)) != PA_STREAM_READY) { if(!PA_STREAM_IS_GOOD(state)) { ERR("Stream did not get ready: %s\n", pa_strerror(pa_context_errno(context))); pa_stream_unref(stream); return NULL; } pa_threaded_mainloop_wait(loop); } pa_stream_set_state_callback(stream, NULL, NULL); return stream; }
static ALCboolean pulse_reset_playback(ALCdevice *device) //{{{ { pulse_data *data = device->ExtraData; pa_stream_state_t state; ppa_threaded_mainloop_lock(data->loop); data->frame_size = aluBytesFromFormat(device->Format) * aluChannelsFromFormat(device->Format); data->attr.minreq = data->frame_size * device->UpdateSize; data->attr.prebuf = -1; data->attr.maxlength = -1; data->attr.fragsize = -1; data->attr.tlength = data->attr.minreq * device->NumUpdates; data->stream_name = "Playback Stream"; switch(aluBytesFromFormat(device->Format)) { case 1: data->spec.format = PA_SAMPLE_U8; break; case 2: data->spec.format = PA_SAMPLE_S16NE; break; case 4: data->spec.format = PA_SAMPLE_FLOAT32NE; break; default: AL_PRINT("Unknown format: 0x%x\n", device->Format); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } data->spec.rate = device->Frequency; data->spec.channels = aluChannelsFromFormat(device->Format); if(ppa_sample_spec_valid(&data->spec) == 0) { AL_PRINT("Invalid sample format\n"); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } data->stream = ppa_stream_new(data->context, data->stream_name, &data->spec, NULL); if(!data->stream) { AL_PRINT("pa_stream_new() failed: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } if(ppa_stream_connect_playback(data->stream, NULL, &data->attr, PA_STREAM_ADJUST_LATENCY, NULL, NULL) < 0) { AL_PRINT("Stream did not connect: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); data->stream = NULL; ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } while((state=ppa_stream_get_state(data->stream)) != PA_STREAM_READY) { if(!PA_STREAM_IS_GOOD(state)) { AL_PRINT("Stream did not get ready: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); data->stream = NULL; ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } ppa_threaded_mainloop_unlock(data->loop); Sleep(1); ppa_threaded_mainloop_lock(data->loop); } data->attr = *(ppa_stream_get_buffer_attr(data->stream)); if((data->attr.tlength%data->attr.minreq) != 0) AL_PRINT("tlength (%d) is not a multiple of minreq (%d)!\n", data->attr.tlength, data->attr.minreq); device->UpdateSize = data->attr.minreq; device->NumUpdates = data->attr.tlength/data->attr.minreq; ppa_stream_set_write_callback(data->stream, stream_write_callback, device); ppa_threaded_mainloop_unlock(data->loop); return ALC_TRUE; } //}}}
void SetupSound (void) { int error_number; // Acquire mainloop /////////////////////////////////////////////////////// device.mainloop = pa_threaded_mainloop_new (); if (device.mainloop == NULL) { fprintf (stderr, "Could not acquire PulseAudio main loop\n"); return; } // Acquire context //////////////////////////////////////////////////////// device.api = pa_threaded_mainloop_get_api (device.mainloop); device.context = pa_context_new (device.api, "PCSXR"); pa_context_set_state_callback (device.context, context_state_cb, &device); if (device.context == NULL) { fprintf (stderr, "Could not acquire PulseAudio device context\n"); return; } // Connect to PulseAudio server /////////////////////////////////////////// if (pa_context_connect (device.context, NULL, 0, NULL) < 0) { error_number = pa_context_errno (device.context); fprintf (stderr, "Could not connect to PulseAudio server: %s\n", pa_strerror(error_number)); return; } // Run mainloop until sever context is ready ////////////////////////////// pa_threaded_mainloop_lock (device.mainloop); if (pa_threaded_mainloop_start (device.mainloop) < 0) { fprintf (stderr, "Could not start mainloop\n"); return; } pa_context_state_t context_state; context_state = pa_context_get_state (device.context); while (context_state != PA_CONTEXT_READY) { context_state = pa_context_get_state (device.context); if (! PA_CONTEXT_IS_GOOD (context_state)) { error_number = pa_context_errno (device.context); fprintf (stderr, "Context state is not good: %s\n", pa_strerror (error_number)); return; } else if (context_state == PA_CONTEXT_READY) break; else fprintf (stderr, "PulseAudio context state is %d\n", context_state); pa_threaded_mainloop_wait (device.mainloop); } // Set sample spec //////////////////////////////////////////////////////// device.spec.format = PA_SAMPLE_S16NE; if (iDisStereo) device.spec.channels = 1; else device.spec.channels = 2; device.spec.rate = settings.frequency; pa_buffer_attr buffer_attributes; buffer_attributes.tlength = pa_bytes_per_second (& device.spec) / 5; buffer_attributes.maxlength = buffer_attributes.tlength * 3; buffer_attributes.minreq = buffer_attributes.tlength / 3; buffer_attributes.prebuf = buffer_attributes.tlength; //maxlength = buffer_attributes.maxlength; //fprintf (stderr, "Total space: %u\n", buffer_attributes.maxlength); //fprintf (stderr, "Minimum request size: %u\n", buffer_attributes.minreq); //fprintf (stderr, "Bytes needed before playback: %u\n", buffer_attributes.prebuf); //fprintf (stderr, "Target buffer size: %lu\n", buffer_attributes.tlength); // Acquire new stream using spec ////////////////////////////////////////// device.stream = pa_stream_new (device.context, "PCSXR", &device.spec, NULL); if (device.stream == NULL) { error_number = pa_context_errno (device.context); fprintf (stderr, "Could not acquire new PulseAudio stream: %s\n", pa_strerror (error_number)); return; } // Set callbacks for server events //////////////////////////////////////// pa_stream_set_state_callback (device.stream, stream_state_cb, &device); pa_stream_set_write_callback (device.stream, stream_request_cb, &device); pa_stream_set_latency_update_callback (device.stream, stream_latency_update_cb, &device); // Ready stream for playback ////////////////////////////////////////////// pa_stream_flags_t flags = (pa_stream_flags_t) (PA_STREAM_ADJUST_LATENCY | PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE); //pa_stream_flags_t flags = (pa_stream_flags_t) (PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_EARLY_REQUESTS); if (pa_stream_connect_playback (device.stream, NULL, &buffer_attributes, flags, NULL, NULL) < 0) { pa_context_errno (device.context); fprintf (stderr, "Could not connect for playback: %s\n", pa_strerror (error_number)); return; } // Run mainloop until stream is ready ///////////////////////////////////// pa_stream_state_t stream_state; stream_state = pa_stream_get_state (device.stream); while (stream_state != PA_STREAM_READY) { stream_state = pa_stream_get_state (device.stream); if (stream_state == PA_STREAM_READY) break; else if (! PA_STREAM_IS_GOOD (stream_state)) { error_number = pa_context_errno (device.context); fprintf (stderr, "Stream state is not good: %s\n", pa_strerror (error_number)); return; } else fprintf (stderr, "PulseAudio stream state is %d\n", stream_state); pa_threaded_mainloop_wait (device.mainloop); } pa_threaded_mainloop_unlock (device.mainloop); fprintf (stderr, "PulseAudio should be connected\n"); return; }
static void audin_pulse_open(IAudinDevice* device, AudinReceive receive, void* user_data) { pa_stream_state_t state; pa_buffer_attr buffer_attr = { 0 }; AudinPulseDevice* pulse = (AudinPulseDevice*) device; if (!pulse->context) return; if (!pulse->sample_spec.rate || pulse->stream) return; DEBUG_DVC(""); pulse->receive = receive; pulse->user_data = user_data; pa_threaded_mainloop_lock(pulse->mainloop); pulse->stream = pa_stream_new(pulse->context, "freerdp_audin", &pulse->sample_spec, NULL); if (!pulse->stream) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_DVC("pa_stream_new failed (%d)", pa_context_errno(pulse->context)); return; } pulse->bytes_per_frame = pa_frame_size(&pulse->sample_spec); pa_stream_set_state_callback(pulse->stream, audin_pulse_stream_state_callback, pulse); pa_stream_set_read_callback(pulse->stream, audin_pulse_stream_request_callback, pulse); buffer_attr.maxlength = (uint32_t) -1; buffer_attr.tlength = (uint32_t) -1; buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; /* 500ms latency */ buffer_attr.fragsize = pa_usec_to_bytes(500000, &pulse->sample_spec); if (pa_stream_connect_record(pulse->stream, pulse->device_name[0] ? pulse->device_name : NULL, &buffer_attr, PA_STREAM_ADJUST_LATENCY) < 0) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_WARN("pa_stream_connect_playback failed (%d)", pa_context_errno(pulse->context)); return; } for (;;) { state = pa_stream_get_state(pulse->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { DEBUG_WARN("bad stream state (%d)", pa_context_errno(pulse->context)); break; } pa_threaded_mainloop_wait(pulse->mainloop); } pa_threaded_mainloop_unlock(pulse->mainloop); if (state == PA_STREAM_READY) { memset(&pulse->adpcm, 0, sizeof(ADPCM)); pulse->buffer = xzalloc(pulse->bytes_per_frame * pulse->frames_per_packet); pulse->buffer_frames = 0; DEBUG_DVC("connected"); } else { audin_pulse_close(device); } }
static gboolean gst_pulsesrc_prepare (GstAudioSrc * asrc, GstRingBufferSpec * spec) { pa_buffer_attr wanted; const pa_buffer_attr *actual; GstPulseSrc *pulsesrc = GST_PULSESRC_CAST (asrc); pa_threaded_mainloop_lock (pulsesrc->mainloop); wanted.maxlength = -1; wanted.tlength = -1; wanted.prebuf = 0; wanted.minreq = -1; wanted.fragsize = spec->segsize; GST_INFO_OBJECT (pulsesrc, "maxlength: %d", wanted.maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d", wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", wanted.prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d", wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d", wanted.fragsize); if (pa_stream_connect_record (pulsesrc->stream, pulsesrc->device, &wanted, PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NOT_MONOTONOUS | #ifdef HAVE_PULSE_0_9_11 PA_STREAM_ADJUST_LATENCY | #endif PA_STREAM_START_CORKED) < 0) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } pulsesrc->corked = TRUE; for (;;) { pa_stream_state_t state; state = pa_stream_get_state (pulsesrc->stream); if (!PA_STREAM_IS_GOOD (state)) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } if (state == PA_STREAM_READY) break; /* Wait until the stream is ready */ pa_threaded_mainloop_wait (pulsesrc->mainloop); } /* get the actual buffering properties now */ actual = pa_stream_get_buffer_attr (pulsesrc->stream); GST_INFO_OBJECT (pulsesrc, "maxlength: %d", actual->maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d (wanted: %d)", actual->tlength, wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", actual->prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d (wanted %d)", actual->minreq, wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d (wanted %d)", actual->fragsize, wanted.fragsize); if (actual->fragsize >= wanted.fragsize) { spec->segsize = actual->fragsize; } else { spec->segsize = actual->fragsize * (wanted.fragsize / actual->fragsize); } spec->segtotal = actual->maxlength / spec->segsize; pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; unlock_and_fail: { gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return FALSE; } }
INT16 m1sdr_Init(int sample_rate) { int format, stereo, rate, fsize, err, state; unsigned int nfreq, periodtime; snd_pcm_hw_params_t *hwparams; #ifdef USE_SDL SDL_AudioSpec aspec; #endif pa_channel_map chanmap; pa_buffer_attr my_pa_attr; hw_present = 0; m1sdr_Callback = NULL; nDSoundSegLen = sample_rate / 60; switch (lnxdrv_apimode) { case 0: // SDL #ifdef USE_SDL SDL_InitSubSystem(SDL_INIT_AUDIO); m1sdr_SetSamplesPerTick(sample_rate/60); playbuf = 0; writebuf = 1; aspec.freq = sample_rate; aspec.format = AUDIO_S16SYS; // keep endian independant aspec.channels = 2; aspec.samples = 512; // has to be a power of 2, and we want it smaller than our buffer size aspec.callback = sdl_callback; aspec.userdata = 0; if (SDL_OpenAudio(&aspec, NULL) < 0) { printf("ERROR: can't open SDL audio\n"); return 0; } // make sure we don't start yet SDL_PauseAudio(1); #endif break; case 1: // ALSA // Try to open audio device if ((err = snd_pcm_open(&pHandle, "default", SND_PCM_STREAM_PLAYBACK, 0)) < 0) { fprintf(stderr, "ALSA: Could not open soundcard (%s)\n", snd_strerror(err)); hw_present = 0; return 0; } if ((err = snd_pcm_hw_params_malloc(&hwparams)) < 0) { fprintf (stderr, "cannot allocate hardware parameter structure (%s)\n", snd_strerror(err)); return 0; } // Init hwparams with full configuration space if ((err = snd_pcm_hw_params_any(pHandle, hwparams)) < 0) { fprintf(stderr, "ALSA: couldn't set hw params (%s)\n", snd_strerror(err)); hw_present = 0; return 0; } // Set access type if ((err = snd_pcm_hw_params_set_access(pHandle, hwparams, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) { fprintf(stderr, "ALSA: can't set access (%s)\n", snd_strerror(err)); return 0; } // Set sample format if ((err = snd_pcm_hw_params_set_format(pHandle, hwparams, SND_PCM_FORMAT_S16)) < 0) { fprintf(stderr, "ALSA: can't set format (%s)\n", snd_strerror(err)); return 0; } // Set sample rate (nearest possible) nfreq = sample_rate; if ((err = snd_pcm_hw_params_set_rate_near(pHandle, hwparams, &nfreq, 0)) < 0) { fprintf(stderr, "ALSA: can't set sample rate (%s)\n", snd_strerror(err)); return 0; } // Set number of channels if ((err = snd_pcm_hw_params_set_channels(pHandle, hwparams, 2)) < 0) { fprintf(stderr, "ALSA: can't set stereo (%s)\n", snd_strerror(err)); return 0; } // Set period time (nearest possible) periodtime = 16; if ((err = snd_pcm_hw_params_set_period_time_near(pHandle, hwparams, &periodtime, 0)) < 0) { fprintf(stderr, "ALSA: can't set period time (%s)\n", snd_strerror(err)); return 0; } // Apply HW parameter settings to PCM device and prepare device if ((err = snd_pcm_hw_params(pHandle, hwparams)) < 0) { fprintf(stderr, "ALSA: unable to install hw_params (%s)\n", snd_strerror(err)); snd_pcm_hw_params_free(hwparams); return 0; } snd_pcm_hw_params_free(hwparams); if ((err = snd_pcm_prepare(pHandle)) < 0) { fprintf (stderr, "cannot prepare audio interface for use (%s)\n", snd_strerror(err)); return 0; } break; case 2: // OSS audiofd = open("/dev/dsp", O_WRONLY, 0); if (audiofd == -1) { audiofd = open("/dev/dsp1", O_WRONLY, 0); if (audiofd == -1) { perror("/dev/dsp1"); return(0); } } // reset things ioctl(audiofd, SNDCTL_DSP_RESET, 0); is_broken_driver = 0; num_frags = NUM_FRAGS_NORMAL; // set the buffer size we want fsize = OSS_FRAGMENT; if (ioctl(audiofd, SNDCTL_DSP_SETFRAGMENT, &fsize) == - 1) { perror("SNDCTL_DSP_SETFRAGMENT"); return(0); } // set 16-bit output format = AFMT_S16_NE; // 16 bit signed "native"-endian if (ioctl(audiofd, SNDCTL_DSP_SETFMT, &format) == - 1) { perror("SNDCTL_DSP_SETFMT"); return(0); } // now set stereo stereo = 1; if (ioctl(audiofd, SNDCTL_DSP_STEREO, &stereo) == - 1) { perror("SNDCTL_DSP_STEREO"); return(0); } // and the sample rate rate = sample_rate; if (ioctl(audiofd, SNDCTL_DSP_SPEED, &rate) == - 1) { perror("SNDCTL_DSP_SPEED"); return(0); } // and make sure that did what we wanted ioctl(audiofd, SNDCTL_DSP_GETBLKSIZE, &fsize); break; case 3: // PulseAudio sample_spec.format = PA_SAMPLE_S16NE; sample_spec.rate = sample_rate; sample_spec.channels = 2; my_pa_context = NULL; my_pa_stream = NULL; my_pa_mainloop = NULL; my_pa_mainloop_api = NULL; #if !PULSE_USE_SIMPLE // get default channel mapping pa_channel_map_init_auto(&chanmap, sample_spec.channels, PA_CHANNEL_MAP_WAVEEX); if (!(my_pa_mainloop = pa_mainloop_new())) { fprintf(stderr, "pa_mainloop_new() failed\n"); return 0; } my_pa_mainloop_api = pa_mainloop_get_api(my_pa_mainloop); /* if (pa_signal_init(my_pa_mainloop_api) != 0) { fprintf(stderr, "pa_signal_init() failed\n"); return 0; }*/ /* Create a new connection context */ if (!(my_pa_context = pa_context_new(my_pa_mainloop_api, "Audio Overload"))) { fprintf(stderr, "pa_context_new() failed\n"); return 0; } /* set the context state CB */ // pa_context_set_state_callback(my_pa_context, context_state_callback, NULL); /* Connect the context */ if (pa_context_connect(my_pa_context, NULL, (pa_context_flags_t)0, NULL) < 0) { fprintf(stderr, "pa_context_connect() failed: %s", pa_strerror(pa_context_errno(my_pa_context))); return 0; } do { pa_mainloop_iterate(my_pa_mainloop, 1, NULL); state = pa_context_get_state(my_pa_context); if (!PA_CONTEXT_IS_GOOD((pa_context_state_t)state)) { printf("PA CONTEXT NOT GOOD\n"); hw_present = 0; return 0; } } while (state != PA_CONTEXT_READY); if (!(my_pa_stream = pa_stream_new(my_pa_context, "Audio Overload", &sample_spec, &chanmap))) { fprintf(stderr, "pa_stream_new() failed: %s\n", pa_strerror(pa_context_errno(my_pa_context))); return 0; } memset(&my_pa_attr, 0, sizeof(my_pa_attr)); my_pa_attr.tlength = nDSoundSegLen * 4 * 4; my_pa_attr.prebuf = -1; my_pa_attr.maxlength = -1; my_pa_attr.minreq = nDSoundSegLen * 4 * 2; if ((err = pa_stream_connect_playback(my_pa_stream, NULL, &my_pa_attr, PA_STREAM_ADJUST_LATENCY, NULL, NULL)) < 0) { fprintf(stderr, "pa_stream_connect_playback() failed: %s\n", pa_strerror(pa_context_errno(my_pa_context))); return 0; } do { pa_mainloop_iterate(my_pa_mainloop, 1, NULL); state = pa_stream_get_state(my_pa_stream); if (!PA_STREAM_IS_GOOD((pa_stream_state_t)state)) { printf("PA STREAM NOT GOOD\n"); hw_present = 0; return 0; } } while (state != PA_STREAM_READY); // printf("PulseAudio setup OK so far, len %d\n", nDSoundSegLen*4); #else my_simple = NULL; #endif break; } hw_present = 1; return (1); }
static int PULSEAUDIO_OpenDevice(_THIS, const char *devname, int iscapture) { struct SDL_PrivateAudioData *h = NULL; Uint16 test_format = 0; pa_sample_spec paspec; pa_buffer_attr paattr; pa_channel_map pacmap; pa_stream_flags_t flags = 0; int state = 0; /* Initialize all variables that we clean on shutdown */ this->hidden = (struct SDL_PrivateAudioData *) SDL_malloc((sizeof *this->hidden)); if (this->hidden == NULL) { return SDL_OutOfMemory(); } SDL_memset(this->hidden, 0, (sizeof *this->hidden)); h = this->hidden; paspec.format = PA_SAMPLE_INVALID; /* Try for a closest match on audio format */ for (test_format = SDL_FirstAudioFormat(this->spec.format); (paspec.format == PA_SAMPLE_INVALID) && test_format;) { #ifdef DEBUG_AUDIO fprintf(stderr, "Trying format 0x%4.4x\n", test_format); #endif switch (test_format) { case AUDIO_U8: paspec.format = PA_SAMPLE_U8; break; case AUDIO_S16LSB: paspec.format = PA_SAMPLE_S16LE; break; case AUDIO_S16MSB: paspec.format = PA_SAMPLE_S16BE; break; case AUDIO_S32LSB: paspec.format = PA_SAMPLE_S32LE; break; case AUDIO_S32MSB: paspec.format = PA_SAMPLE_S32BE; break; case AUDIO_F32LSB: paspec.format = PA_SAMPLE_FLOAT32LE; break; case AUDIO_F32MSB: paspec.format = PA_SAMPLE_FLOAT32BE; break; default: paspec.format = PA_SAMPLE_INVALID; break; } if (paspec.format == PA_SAMPLE_INVALID) { test_format = SDL_NextAudioFormat(); } } if (paspec.format == PA_SAMPLE_INVALID) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("Couldn't find any hardware audio formats"); } this->spec.format = test_format; /* Calculate the final parameters for this audio specification */ #ifdef PA_STREAM_ADJUST_LATENCY this->spec.samples /= 2; /* Mix in smaller chunck to avoid underruns */ #endif SDL_CalculateAudioSpec(&this->spec); /* Allocate mixing buffer */ h->mixlen = this->spec.size; h->mixbuf = (Uint8 *) SDL_AllocAudioMem(h->mixlen); if (h->mixbuf == NULL) { PULSEAUDIO_CloseDevice(this); return SDL_OutOfMemory(); } SDL_memset(h->mixbuf, this->spec.silence, this->spec.size); paspec.channels = this->spec.channels; paspec.rate = this->spec.freq; /* Reduced prebuffering compared to the defaults. */ #ifdef PA_STREAM_ADJUST_LATENCY /* 2x original requested bufsize */ paattr.tlength = h->mixlen * 4; paattr.prebuf = -1; paattr.maxlength = -1; /* -1 can lead to pa_stream_writable_size() >= mixlen never being true */ paattr.minreq = h->mixlen; flags = PA_STREAM_ADJUST_LATENCY; #else paattr.tlength = h->mixlen*2; paattr.prebuf = h->mixlen*2; paattr.maxlength = h->mixlen*2; paattr.minreq = h->mixlen; #endif /* The SDL ALSA output hints us that we use Windows' channel mapping */ /* http://bugzilla.libsdl.org/show_bug.cgi?id=110 */ PULSEAUDIO_pa_channel_map_init_auto(&pacmap, this->spec.channels, PA_CHANNEL_MAP_WAVEEX); /* Set up a new main loop */ if (!(h->mainloop = PULSEAUDIO_pa_mainloop_new())) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("pa_mainloop_new() failed"); } h->mainloop_api = PULSEAUDIO_pa_mainloop_get_api(h->mainloop); h->context = PULSEAUDIO_pa_context_new(h->mainloop_api, getAppName()); if (!h->context) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("pa_context_new() failed"); } /* Connect to the PulseAudio server */ if (PULSEAUDIO_pa_context_connect(h->context, NULL, 0, NULL) < 0) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("Could not setup connection to PulseAudio"); } do { if (PULSEAUDIO_pa_mainloop_iterate(h->mainloop, 1, NULL) < 0) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("pa_mainloop_iterate() failed"); } state = PULSEAUDIO_pa_context_get_state(h->context); if (!PA_CONTEXT_IS_GOOD(state)) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("Could not connect to PulseAudio"); } } while (state != PA_CONTEXT_READY); h->stream = PULSEAUDIO_pa_stream_new( h->context, "Simple DirectMedia Layer", /* stream description */ &paspec, /* sample format spec */ &pacmap /* channel map */ ); if (h->stream == NULL) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("Could not set up PulseAudio stream"); } if (PULSEAUDIO_pa_stream_connect_playback(h->stream, NULL, &paattr, flags, NULL, NULL) < 0) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("Could not connect PulseAudio stream"); } do { if (PULSEAUDIO_pa_mainloop_iterate(h->mainloop, 1, NULL) < 0) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("pa_mainloop_iterate() failed"); } state = PULSEAUDIO_pa_stream_get_state(h->stream); if (!PA_STREAM_IS_GOOD(state)) { PULSEAUDIO_CloseDevice(this); return SDL_SetError("Could not create to PulseAudio stream"); } } while (state != PA_STREAM_READY); /* We're ready to rock and roll. :-) */ return 0; }
static void rdpsnd_pulse_open(rdpsndDevicePlugin* device, rdpsndFormat* format, int latency) { rdpsndPulsePlugin* pulse = (rdpsndPulsePlugin*)device; pa_stream_state_t state; pa_stream_flags_t flags; pa_buffer_attr buffer_attr = { 0 }; char ss[PA_SAMPLE_SPEC_SNPRINT_MAX]; if (!pulse->context || pulse->stream) { DEBUG_WARN("pulse stream has been created."); return; } rdpsnd_pulse_set_format_spec(pulse, format); pulse->latency = latency; if (pa_sample_spec_valid(&pulse->sample_spec) == 0) { pa_sample_spec_snprint(ss, sizeof(ss), &pulse->sample_spec); DEBUG_WARN("Invalid sample spec %s", ss); return; } pa_threaded_mainloop_lock(pulse->mainloop); pulse->stream = pa_stream_new(pulse->context, "freerdp", &pulse->sample_spec, NULL); if (!pulse->stream) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_WARN("pa_stream_new failed (%d)", pa_context_errno(pulse->context)); return; } /* install essential callbacks */ pa_stream_set_state_callback(pulse->stream, rdpsnd_pulse_stream_state_callback, pulse); pa_stream_set_write_callback(pulse->stream, rdpsnd_pulse_stream_request_callback, pulse); flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE; if (pulse->latency > 0) { buffer_attr.maxlength = pa_usec_to_bytes(pulse->latency * 2 * 1000, &pulse->sample_spec); buffer_attr.tlength = pa_usec_to_bytes(pulse->latency * 1000, &pulse->sample_spec); buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; buffer_attr.fragsize = (uint32_t) -1; flags |= PA_STREAM_ADJUST_LATENCY; } if (pa_stream_connect_playback(pulse->stream, pulse->device_name, pulse->latency > 0 ? &buffer_attr : NULL, flags, NULL, NULL) < 0) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_WARN("pa_stream_connect_playback failed (%d)", pa_context_errno(pulse->context)); return; } for (;;) { state = pa_stream_get_state(pulse->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { DEBUG_WARN("bad stream state (%d)", pa_context_errno(pulse->context)); break; } pa_threaded_mainloop_wait(pulse->mainloop); } pa_threaded_mainloop_unlock(pulse->mainloop); if (state == PA_STREAM_READY) { freerdp_dsp_context_reset_adpcm(pulse->dsp_context); DEBUG_SVC("connected"); } else { rdpsnd_pulse_close(device); } }
static gboolean gst_pulsesrc_prepare (GstAudioSrc * asrc, GstAudioRingBufferSpec * spec) { pa_buffer_attr wanted; const pa_buffer_attr *actual; GstPulseSrc *pulsesrc = GST_PULSESRC_CAST (asrc); pa_stream_flags_t flags; pa_operation *o; GstAudioClock *clock; pa_threaded_mainloop_lock (pulsesrc->mainloop); { GstAudioRingBufferSpec s = *spec; const pa_channel_map *m; m = pa_stream_get_channel_map (pulsesrc->stream); gst_pulse_channel_map_to_gst (m, &s); gst_audio_ring_buffer_set_channel_positions (GST_AUDIO_BASE_SRC (pulsesrc)->ringbuffer, s.info.position); } /* enable event notifications */ GST_LOG_OBJECT (pulsesrc, "subscribing to context events"); if (!(o = pa_context_subscribe (pulsesrc->context, PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT, NULL, NULL))) { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("pa_context_subscribe() failed: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } pa_operation_unref (o); wanted.maxlength = -1; wanted.tlength = -1; wanted.prebuf = 0; wanted.minreq = -1; wanted.fragsize = spec->segsize; GST_INFO_OBJECT (pulsesrc, "maxlength: %d", wanted.maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d", wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", wanted.prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d", wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d", wanted.fragsize); flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NOT_MONOTONIC | PA_STREAM_ADJUST_LATENCY | PA_STREAM_START_CORKED; if (pulsesrc->mute_set && pulsesrc->mute) flags |= PA_STREAM_START_MUTED; if (pa_stream_connect_record (pulsesrc->stream, pulsesrc->device, &wanted, flags) < 0) { goto connect_failed; } /* our clock will now start from 0 again */ clock = GST_AUDIO_CLOCK (GST_AUDIO_BASE_SRC (pulsesrc)->clock); gst_audio_clock_reset (clock, 0); pulsesrc->corked = TRUE; for (;;) { pa_stream_state_t state; state = pa_stream_get_state (pulsesrc->stream); if (!PA_STREAM_IS_GOOD (state)) goto stream_is_bad; if (state == PA_STREAM_READY) break; /* Wait until the stream is ready */ pa_threaded_mainloop_wait (pulsesrc->mainloop); } pulsesrc->stream_connected = TRUE; /* store the source output index so it can be accessed via a property */ pulsesrc->source_output_idx = pa_stream_get_index (pulsesrc->stream); g_object_notify (G_OBJECT (pulsesrc), "source-output-index"); if (pulsesrc->volume_set) { gst_pulsesrc_set_stream_volume (pulsesrc, pulsesrc->volume); pulsesrc->volume_set = FALSE; } /* get the actual buffering properties now */ actual = pa_stream_get_buffer_attr (pulsesrc->stream); GST_INFO_OBJECT (pulsesrc, "maxlength: %d", actual->maxlength); GST_INFO_OBJECT (pulsesrc, "tlength: %d (wanted: %d)", actual->tlength, wanted.tlength); GST_INFO_OBJECT (pulsesrc, "prebuf: %d", actual->prebuf); GST_INFO_OBJECT (pulsesrc, "minreq: %d (wanted %d)", actual->minreq, wanted.minreq); GST_INFO_OBJECT (pulsesrc, "fragsize: %d (wanted %d)", actual->fragsize, wanted.fragsize); if (actual->fragsize >= wanted.fragsize) { spec->segsize = actual->fragsize; } else { spec->segsize = actual->fragsize * (wanted.fragsize / actual->fragsize); } spec->segtotal = actual->maxlength / spec->segsize; if (!pulsesrc->paused) { GST_DEBUG_OBJECT (pulsesrc, "uncorking because we are playing"); gst_pulsesrc_set_corked (pulsesrc, FALSE, FALSE); } pa_threaded_mainloop_unlock (pulsesrc->mainloop); return TRUE; /* ERRORS */ connect_failed: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } stream_is_bad: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("Failed to connect stream: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } unlock_and_fail: { gst_pulsesrc_destroy_stream (pulsesrc); pa_threaded_mainloop_unlock (pulsesrc->mainloop); return FALSE; } }
static ALCboolean pulse_reset_playback(ALCdevice *device) //{{{ { pulse_data *data = device->ExtraData; pa_stream_flags_t flags = 0; pa_stream_state_t state; pa_channel_map chanmap; ppa_threaded_mainloop_lock(data->loop); if(!ConfigValueExists(NULL, "format")) { pa_operation *o; struct { pa_threaded_mainloop *loop; char *name; } server_data; server_data.loop = data->loop; server_data.name = NULL; o = ppa_context_get_server_info(data->context, server_info_callback, &server_data); while(ppa_operation_get_state(o) == PA_OPERATION_RUNNING) ppa_threaded_mainloop_wait(data->loop); ppa_operation_unref(o); if(server_data.name) { o = ppa_context_get_sink_info_by_name(data->context, server_data.name, sink_info_callback, device); while(ppa_operation_get_state(o) == PA_OPERATION_RUNNING) ppa_threaded_mainloop_wait(data->loop); ppa_operation_unref(o); free(server_data.name); } } if(!ConfigValueExists(NULL, "frequency")) flags |= PA_STREAM_FIX_RATE; data->frame_size = aluBytesFromFormat(device->Format) * aluChannelsFromFormat(device->Format); data->stream_name = "Playback Stream"; data->attr.minreq = -1; data->attr.prebuf = -1; data->attr.fragsize = -1; data->attr.tlength = device->UpdateSize * device->NumUpdates * data->frame_size; data->attr.maxlength = data->attr.tlength; switch(aluBytesFromFormat(device->Format)) { case 1: data->spec.format = PA_SAMPLE_U8; break; case 2: data->spec.format = PA_SAMPLE_S16NE; break; case 4: data->spec.format = PA_SAMPLE_FLOAT32NE; break; default: AL_PRINT("Unknown format: 0x%x\n", device->Format); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } data->spec.rate = device->Frequency; data->spec.channels = aluChannelsFromFormat(device->Format); if(ppa_sample_spec_valid(&data->spec) == 0) { AL_PRINT("Invalid sample format\n"); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } if(!ppa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX)) { AL_PRINT("Couldn't build map for channel count (%d)!\n", data->spec.channels); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } SetDefaultWFXChannelOrder(device); data->stream = ppa_stream_new(data->context, data->stream_name, &data->spec, &chanmap); if(!data->stream) { AL_PRINT("pa_stream_new() failed: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } ppa_stream_set_state_callback(data->stream, stream_state_callback, device); if(ppa_stream_connect_playback(data->stream, NULL, &data->attr, flags, NULL, NULL) < 0) { AL_PRINT("Stream did not connect: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); data->stream = NULL; ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } while((state=ppa_stream_get_state(data->stream)) != PA_STREAM_READY) { if(!PA_STREAM_IS_GOOD(state)) { AL_PRINT("Stream did not get ready: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); data->stream = NULL; ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } ppa_threaded_mainloop_wait(data->loop); } ppa_stream_set_state_callback(data->stream, stream_state_callback2, device); data->spec = *(ppa_stream_get_sample_spec(data->stream)); if(device->Frequency != data->spec.rate) { pa_operation *o; /* Server updated our playback rate, so modify the buffer attribs * accordingly. */ data->attr.tlength = (ALuint64)(data->attr.tlength/data->frame_size) * data->spec.rate / device->Frequency * data->frame_size; data->attr.maxlength = data->attr.tlength; o = ppa_stream_set_buffer_attr(data->stream, &data->attr, stream_success_callback, device); while(ppa_operation_get_state(o) == PA_OPERATION_RUNNING) ppa_threaded_mainloop_wait(data->loop); ppa_operation_unref(o); device->Frequency = data->spec.rate; } stream_buffer_attr_callback(data->stream, device); #if PA_CHECK_VERSION(0,9,15) if(ppa_stream_set_buffer_attr_callback) ppa_stream_set_buffer_attr_callback(data->stream, stream_buffer_attr_callback, device); #endif stream_write_callback(data->stream, data->attr.tlength, device); ppa_stream_set_write_callback(data->stream, stream_write_callback, device); ppa_threaded_mainloop_unlock(data->loop); return ALC_TRUE; } //}}}
static ALCboolean pulse_reset_playback(ALCdevice *device) //{{{ { pulse_data *data = device->ExtraData; pa_stream_state_t state; pa_channel_map chanmap; ppa_threaded_mainloop_lock(data->loop); data->frame_size = aluBytesFromFormat(device->Format) * aluChannelsFromFormat(device->Format); data->attr.minreq = data->frame_size * device->UpdateSize; data->attr.prebuf = -1; data->attr.maxlength = -1; data->attr.fragsize = -1; data->attr.tlength = data->attr.minreq * device->NumUpdates; data->stream_name = "Playback Stream"; switch(aluBytesFromFormat(device->Format)) { case 1: data->spec.format = PA_SAMPLE_U8; break; case 2: data->spec.format = PA_SAMPLE_S16NE; break; case 4: data->spec.format = PA_SAMPLE_FLOAT32NE; break; default: AL_PRINT("Unknown format: 0x%x\n", device->Format); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } data->spec.rate = device->Frequency; data->spec.channels = aluChannelsFromFormat(device->Format); if(ppa_sample_spec_valid(&data->spec) == 0) { AL_PRINT("Invalid sample format\n"); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } #ifdef _WIN32 if(!ppa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX)) { AL_PRINT("Couldn't build map for channel count (%d)!", data->spec.channels); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } #else switch(data->spec.channels) { case 1: ppa_channel_map_parse(&chanmap, "mono"); break; case 2: ppa_channel_map_parse(&chanmap, "front-left,front-right"); break; case 4: ppa_channel_map_parse(&chanmap, "front-left,front-right,rear-left,rear-right"); break; case 6: ppa_channel_map_parse(&chanmap, "front-left,front-right,rear-left,rear-right,front-center,lfe"); break; case 7: ppa_channel_map_parse(&chanmap, "front-left,front-right,front-center,lfe,rear-center,side-left,side-right"); break; case 8: ppa_channel_map_parse(&chanmap, "front-left,front-right,rear-left,rear-right,front-center,lfe,side-left,side-right"); break; default: AL_PRINT("Got unhandled channel count (%d)!", data->spec.channels); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } #endif data->stream = ppa_stream_new(data->context, data->stream_name, &data->spec, &chanmap); if(!data->stream) { AL_PRINT("pa_stream_new() failed: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } ppa_stream_set_state_callback(data->stream, stream_state_callback, device); ppa_stream_set_write_callback(data->stream, stream_write_callback, device); if(ppa_stream_connect_playback(data->stream, NULL, &data->attr, PA_STREAM_ADJUST_LATENCY, NULL, NULL) < 0) { AL_PRINT("Stream did not connect: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); data->stream = NULL; ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } while((state=ppa_stream_get_state(data->stream)) != PA_STREAM_READY) { if(!PA_STREAM_IS_GOOD(state)) { AL_PRINT("Stream did not get ready: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); data->stream = NULL; ppa_threaded_mainloop_unlock(data->loop); return ALC_FALSE; } ppa_threaded_mainloop_wait(data->loop); ppa_threaded_mainloop_accept(data->loop); } ppa_stream_set_state_callback(data->stream, stream_state_callback2, device); stream_buffer_attr_callback(data->stream, device); ppa_stream_set_buffer_attr_callback(data->stream, stream_buffer_attr_callback, device); ppa_threaded_mainloop_unlock(data->loop); return ALC_TRUE; } //}}}
static void rdpsnd_pulse_open(rdpsndDevicePlugin* device, rdpsndFormat* format) { rdpsndPulsePlugin* pulse = (rdpsndPulsePlugin*)device; pa_stream_state_t state; char ss[PA_SAMPLE_SPEC_SNPRINT_MAX]; if (!pulse->context || pulse->stream) { DEBUG_WARN("pulse stream has been created."); return; } rdpsnd_pulse_set_format_spec(pulse, format); if (pa_sample_spec_valid(&pulse->sample_spec) == 0) { pa_sample_spec_snprint(ss, sizeof(ss), &pulse->sample_spec); DEBUG_WARN("Invalid sample spec %s", ss); return; } pa_threaded_mainloop_lock(pulse->mainloop); pulse->stream = pa_stream_new(pulse->context, "freerdp", &pulse->sample_spec, NULL); if (!pulse->stream) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_WARN("pa_stream_new failed (%d)", pa_context_errno(pulse->context)); return; } /* install essential callbacks */ pa_stream_set_state_callback(pulse->stream, rdpsnd_pulse_stream_state_callback, pulse); pa_stream_set_write_callback(pulse->stream, rdpsnd_pulse_stream_request_callback, pulse); if (pa_stream_connect_playback(pulse->stream, pulse->device_name, NULL, PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL) < 0) { pa_threaded_mainloop_unlock(pulse->mainloop); DEBUG_WARN("pa_stream_connect_playback failed (%d)", pa_context_errno(pulse->context)); return; } for (;;) { state = pa_stream_get_state(pulse->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { DEBUG_WARN("bad stream state (%d)", pa_context_errno(pulse->context)); break; } pa_threaded_mainloop_wait(pulse->mainloop); } pa_threaded_mainloop_unlock(pulse->mainloop); if (state == PA_STREAM_READY) { memset(&pulse->adpcm, 0, sizeof(ADPCM)); DEBUG_SVC("connected"); } else { rdpsnd_pulse_close(device); } }
static int init(struct ao *ao) { pa_proplist *proplist = NULL; pa_format_info *format = NULL; struct priv *priv = ao->priv; char *sink = priv->cfg_sink && priv->cfg_sink[0] ? priv->cfg_sink : ao->device; if (pa_init_boilerplate(ao) < 0) return -1; pa_threaded_mainloop_lock(priv->mainloop); if (!(proplist = pa_proplist_new())) { MP_ERR(ao, "Failed to allocate proplist\n"); goto unlock_and_fail; } (void)pa_proplist_sets(proplist, PA_PROP_MEDIA_ICON_NAME, ao->client_name); if (!(format = pa_format_info_new())) goto unlock_and_fail; if (!set_format(ao, format)) { ao->channels = (struct mp_chmap) MP_CHMAP_INIT_STEREO; ao->samplerate = 48000; ao->format = AF_FORMAT_FLOAT; if (!set_format(ao, format)) { MP_ERR(ao, "Invalid audio format\n"); goto unlock_and_fail; } } if (!(priv->stream = pa_stream_new_extended(priv->context, "audio stream", &format, 1, proplist))) goto unlock_and_fail; pa_format_info_free(format); format = NULL; pa_proplist_free(proplist); proplist = NULL; pa_stream_set_state_callback(priv->stream, stream_state_cb, ao); pa_stream_set_write_callback(priv->stream, stream_request_cb, ao); pa_stream_set_latency_update_callback(priv->stream, stream_latency_update_cb, ao); int buf_size = af_fmt_seconds_to_bytes(ao->format, priv->cfg_buffer / 1000.0, ao->channels.num, ao->samplerate); pa_buffer_attr bufattr = { .maxlength = -1, .tlength = buf_size > 0 ? buf_size : (uint32_t)-1, .prebuf = -1, .minreq = -1, .fragsize = -1, }; int flags = PA_STREAM_NOT_MONOTONIC; if (!priv->cfg_latency_hacks) flags |= PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_AUTO_TIMING_UPDATE; if (pa_stream_connect_playback(priv->stream, sink, &bufattr, flags, NULL, NULL) < 0) goto unlock_and_fail; /* Wait until the stream is ready */ while (1) { int state = pa_stream_get_state(priv->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) goto unlock_and_fail; pa_threaded_mainloop_wait(priv->mainloop); } if (pa_stream_is_suspended(priv->stream)) { MP_ERR(ao, "The stream is suspended. Bailing out.\n"); goto unlock_and_fail; } pa_threaded_mainloop_unlock(priv->mainloop); return 0; unlock_and_fail: pa_threaded_mainloop_unlock(priv->mainloop); if (format) pa_format_info_free(format); if (proplist) pa_proplist_free(proplist); uninit(ao); return -1; } static void cork(struct ao *ao, bool pause) { struct priv *priv = ao->priv; pa_threaded_mainloop_lock(priv->mainloop); priv->retval = 0; if (!waitop(priv, pa_stream_cork(priv->stream, pause, success_cb, ao)) || !priv->retval) GENERIC_ERR_MSG("pa_stream_cork() failed"); } // Play the specified data to the pulseaudio server static int play(struct ao *ao, void **data, int samples, int flags) { struct priv *priv = ao->priv; pa_threaded_mainloop_lock(priv->mainloop); if (pa_stream_write(priv->stream, data[0], samples * ao->sstride, NULL, 0, PA_SEEK_RELATIVE) < 0) { GENERIC_ERR_MSG("pa_stream_write() failed"); samples = -1; } if (flags & AOPLAY_FINAL_CHUNK) { // Force start in case the stream was too short for prebuf pa_operation *op = pa_stream_trigger(priv->stream, NULL, NULL); pa_operation_unref(op); } pa_threaded_mainloop_unlock(priv->mainloop); return samples; } // Reset the audio stream, i.e. flush the playback buffer on the server side static void reset(struct ao *ao) { // pa_stream_flush() works badly if not corked cork(ao, true); struct priv *priv = ao->priv; pa_threaded_mainloop_lock(priv->mainloop); priv->retval = 0; if (!waitop(priv, pa_stream_flush(priv->stream, success_cb, ao)) || !priv->retval) GENERIC_ERR_MSG("pa_stream_flush() failed"); cork(ao, false); }
static int rdpsnd_pulse_open(rdpsndDevicePlugin * devplugin) { struct pulse_device_data * pulse_data; pa_stream_state_t state; pa_buffer_attr buffer_attr = { 0 }; pulse_data = (struct pulse_device_data *) devplugin->device_data; if (!pulse_data->context) return 1; /* Since rdpsnd_main calls set_format() after open(), but we need the format spec to open the stream, we will defer the open request if initial set_format request is not yet received */ if (!pulse_data->sample_spec.rate || pulse_data->stream) return 0; LLOGLN(10, ("rdpsnd_pulse_open:")); pa_threaded_mainloop_lock(pulse_data->mainloop); pulse_data->stream = pa_stream_new(pulse_data->context, "freerdp", &pulse_data->sample_spec, NULL); if (!pulse_data->stream) { pa_threaded_mainloop_unlock(pulse_data->mainloop); LLOGLN(0, ("rdpsnd_pulse_open: pa_stream_new failed (%d)", pa_context_errno(pulse_data->context))); return 1; } pa_stream_set_state_callback(pulse_data->stream, rdpsnd_pulse_stream_state_callback, devplugin); pa_stream_set_write_callback(pulse_data->stream, rdpsnd_pulse_stream_request_callback, devplugin); buffer_attr.maxlength = (uint32_t) -1; buffer_attr.tlength = (uint32_t) -1;//pa_usec_to_bytes(2000000, &pulse_data->sample_spec); buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; buffer_attr.fragsize = (uint32_t) -1; if (pa_stream_connect_playback(pulse_data->stream, pulse_data->device_name[0] ? pulse_data->device_name : NULL, &buffer_attr, 0, NULL, NULL) < 0) { pa_threaded_mainloop_unlock(pulse_data->mainloop); LLOGLN(0, ("rdpsnd_pulse_open: pa_stream_connect_playback failed (%d)", pa_context_errno(pulse_data->context))); return 1; } for (;;) { state = pa_stream_get_state(pulse_data->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { LLOGLN(0, ("rdpsnd_pulse_open: bad stream state (%d)", pa_context_errno(pulse_data->context))); break; } pa_threaded_mainloop_wait(pulse_data->mainloop); } pa_threaded_mainloop_unlock(pulse_data->mainloop); if (state == PA_STREAM_READY) { memset(&pulse_data->adpcm, 0, sizeof(rdpsndDspAdpcm)); LLOGLN(0, ("rdpsnd_pulse_open: connected")); return 0; } else { rdpsnd_pulse_close(devplugin); return 1; } }
static ALCboolean pulse_open_capture(ALCdevice *device, const ALCchar *device_name) //{{{ { pulse_data *data; pa_stream_state_t state; if(!pa_handle) return ALC_FALSE; if(!device_name) device_name = pulse_capture_device; else if(strcmp(device_name, pulse_capture_device) != 0) return ALC_FALSE; if(pulse_open(device, device_name) == ALC_FALSE) return ALC_FALSE; data = device->ExtraData; ppa_threaded_mainloop_lock(data->loop); data->samples = device->UpdateSize * device->NumUpdates; data->frame_size = aluBytesFromFormat(device->Format) * aluChannelsFromFormat(device->Format); if(!(data->ring = CreateRingBuffer(data->frame_size, data->samples))) { ppa_threaded_mainloop_unlock(data->loop); pulse_close(device); return ALC_FALSE; } data->attr.minreq = -1; data->attr.prebuf = -1; data->attr.maxlength = -1; data->attr.tlength = -1; data->attr.fragsize = data->frame_size * data->samples / 2; data->stream_name = "Capture Stream"; data->spec.rate = device->Frequency; data->spec.channels = aluChannelsFromFormat(device->Format); switch(aluBytesFromFormat(device->Format)) { case 1: data->spec.format = PA_SAMPLE_U8; break; case 2: data->spec.format = PA_SAMPLE_S16NE; break; case 4: data->spec.format = PA_SAMPLE_FLOAT32NE; break; default: AL_PRINT("Unknown format: 0x%x\n", device->Format); ppa_threaded_mainloop_unlock(data->loop); pulse_close(device); return ALC_FALSE; } if(ppa_sample_spec_valid(&data->spec) == 0) { AL_PRINT("Invalid sample format\n"); ppa_threaded_mainloop_unlock(data->loop); pulse_close(device); return ALC_FALSE; } data->stream = ppa_stream_new(data->context, data->stream_name, &data->spec, NULL); if(!data->stream) { AL_PRINT("pa_stream_new() failed: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_threaded_mainloop_unlock(data->loop); pulse_close(device); return ALC_FALSE; } if(ppa_stream_connect_record(data->stream, NULL, &data->attr, PA_STREAM_ADJUST_LATENCY) < 0) { AL_PRINT("Stream did not connect: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); ppa_threaded_mainloop_unlock(data->loop); data->stream = NULL; pulse_close(device); return ALC_FALSE; } while((state=ppa_stream_get_state(data->stream)) != PA_STREAM_READY) { if(!PA_STREAM_IS_GOOD(state)) { AL_PRINT("Stream did not get ready: %s\n", ppa_strerror(ppa_context_errno(data->context))); ppa_stream_unref(data->stream); ppa_threaded_mainloop_unlock(data->loop); data->stream = NULL; pulse_close(device); return ALC_FALSE; } ppa_threaded_mainloop_unlock(data->loop); Sleep(1); ppa_threaded_mainloop_lock(data->loop); } ppa_threaded_mainloop_unlock(data->loop); return ALC_TRUE; } //}}}
static ALCenum pulse_open_capture(ALCdevice *device, const ALCchar *device_name) //{{{ { char *pulse_name = NULL; pulse_data *data; pa_stream_flags_t flags = 0; pa_stream_state_t state; pa_channel_map chanmap; if(!allCaptureDevNameMap) probe_devices(AL_TRUE); if(!device_name) device_name = pulse_device; else if(strcmp(device_name, pulse_device) != 0) { ALuint i; for(i = 0;i < numCaptureDevNames;i++) { if(strcmp(device_name, allCaptureDevNameMap[i].name) == 0) { pulse_name = allCaptureDevNameMap[i].device_name; break; } } if(i == numCaptureDevNames) return ALC_INVALID_VALUE; } if(pulse_open(device, device_name) == ALC_FALSE) return ALC_INVALID_VALUE; data = device->ExtraData; pa_threaded_mainloop_lock(data->loop); data->samples = device->UpdateSize * device->NumUpdates; data->frame_size = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); data->samples = maxu(data->samples, 100 * device->Frequency / 1000); if(!(data->ring = CreateRingBuffer(data->frame_size, data->samples))) { pa_threaded_mainloop_unlock(data->loop); goto fail; } data->attr.minreq = -1; data->attr.prebuf = -1; data->attr.maxlength = data->samples * data->frame_size; data->attr.tlength = -1; data->attr.fragsize = minu(data->samples, 50*device->Frequency/1000) * data->frame_size; data->spec.rate = device->Frequency; data->spec.channels = ChannelsFromDevFmt(device->FmtChans); switch(device->FmtType) { case DevFmtUByte: data->spec.format = PA_SAMPLE_U8; break; case DevFmtShort: data->spec.format = PA_SAMPLE_S16NE; break; case DevFmtFloat: data->spec.format = PA_SAMPLE_FLOAT32NE; break; case DevFmtByte: case DevFmtUShort: ERR("Capture format type %#x capture not supported on PulseAudio\n", device->FmtType); pa_threaded_mainloop_unlock(data->loop); goto fail; } if(pa_sample_spec_valid(&data->spec) == 0) { ERR("Invalid sample format\n"); pa_threaded_mainloop_unlock(data->loop); goto fail; } if(!pa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX)) { ERR("Couldn't build map for channel count (%d)!\n", data->spec.channels); pa_threaded_mainloop_unlock(data->loop); goto fail; } data->stream = pa_stream_new(data->context, "Capture Stream", &data->spec, &chanmap); if(!data->stream) { ERR("pa_stream_new() failed: %s\n", pa_strerror(pa_context_errno(data->context))); pa_threaded_mainloop_unlock(data->loop); goto fail; } pa_stream_set_state_callback(data->stream, stream_state_callback, data->loop); flags |= PA_STREAM_START_CORKED|PA_STREAM_ADJUST_LATENCY; if(pa_stream_connect_record(data->stream, pulse_name, &data->attr, flags) < 0) { ERR("Stream did not connect: %s\n", pa_strerror(pa_context_errno(data->context))); pa_stream_unref(data->stream); data->stream = NULL; pa_threaded_mainloop_unlock(data->loop); goto fail; } while((state=pa_stream_get_state(data->stream)) != PA_STREAM_READY) { if(!PA_STREAM_IS_GOOD(state)) { ERR("Stream did not get ready: %s\n", pa_strerror(pa_context_errno(data->context))); pa_stream_unref(data->stream); data->stream = NULL; pa_threaded_mainloop_unlock(data->loop); goto fail; } pa_threaded_mainloop_wait(data->loop); } pa_stream_set_state_callback(data->stream, stream_state_callback2, device); pa_threaded_mainloop_unlock(data->loop); return ALC_NO_ERROR; fail: pulse_close(device); return ALC_INVALID_VALUE; } //}}}
static int PULSE_OpenAudio(_THIS, SDL_AudioSpec *spec) { int state; Uint16 test_format; pa_sample_spec paspec; pa_buffer_attr paattr; pa_channel_map pacmap; pa_stream_flags_t flags = 0; paspec.format = PA_SAMPLE_INVALID; for ( test_format = SDL_FirstAudioFormat(spec->format); test_format; ) { switch ( test_format ) { case AUDIO_U8: paspec.format = PA_SAMPLE_U8; break; case AUDIO_S16LSB: paspec.format = PA_SAMPLE_S16LE; break; case AUDIO_S16MSB: paspec.format = PA_SAMPLE_S16BE; break; } if ( paspec.format != PA_SAMPLE_INVALID ) break; } if (paspec.format == PA_SAMPLE_INVALID ) { SDL_SetError("Couldn't find any suitable audio formats"); return(-1); } spec->format = test_format; paspec.channels = spec->channels; paspec.rate = spec->freq; /* Calculate the final parameters for this audio specification */ #ifdef PA_STREAM_ADJUST_LATENCY spec->samples /= 2; /* Mix in smaller chunck to avoid underruns */ #endif SDL_CalculateAudioSpec(spec); /* Allocate mixing buffer */ mixlen = spec->size; mixbuf = (Uint8 *)SDL_AllocAudioMem(mixlen); if ( mixbuf == NULL ) { return(-1); } SDL_memset(mixbuf, spec->silence, spec->size); /* Reduced prebuffering compared to the defaults. */ #ifdef PA_STREAM_ADJUST_LATENCY paattr.tlength = mixlen * 4; /* 2x original requested bufsize */ paattr.prebuf = -1; paattr.maxlength = -1; paattr.minreq = mixlen; /* -1 can lead to pa_stream_writable_size() >= mixlen never becoming true */ flags = PA_STREAM_ADJUST_LATENCY; #else paattr.tlength = mixlen*2; paattr.prebuf = mixlen*2; paattr.maxlength = mixlen*2; paattr.minreq = mixlen; #endif /* The SDL ALSA output hints us that we use Windows' channel mapping */ /* http://bugzilla.libsdl.org/show_bug.cgi?id=110 */ SDL_NAME(pa_channel_map_init_auto)( &pacmap, spec->channels, PA_CHANNEL_MAP_WAVEEX); /* Set up a new main loop */ if (!(mainloop = SDL_NAME(pa_mainloop_new)())) { PULSE_CloseAudio(this); SDL_SetError("pa_mainloop_new() failed"); return(-1); } mainloop_api = SDL_NAME(pa_mainloop_get_api)(mainloop); if (!(context = SDL_NAME(pa_context_new)(mainloop_api, get_progname()))) { PULSE_CloseAudio(this); SDL_SetError("pa_context_new() failed"); return(-1); } /* Connect to the PulseAudio server */ if (SDL_NAME(pa_context_connect)(context, NULL, 0, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("Could not setup connection to PulseAudio"); return(-1); } do { if (SDL_NAME(pa_mainloop_iterate)(mainloop, 1, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("pa_mainloop_iterate() failed"); return(-1); } state = SDL_NAME(pa_context_get_state)(context); if (!PA_CONTEXT_IS_GOOD(state)) { PULSE_CloseAudio(this); SDL_SetError("Could not connect to PulseAudio"); return(-1); } } while (state != PA_CONTEXT_READY); stream = SDL_NAME(pa_stream_new)( context, "Simple DirectMedia Layer", /* stream description */ &paspec, /* sample format spec */ &pacmap /* channel map */ ); if ( stream == NULL ) { PULSE_CloseAudio(this); SDL_SetError("Could not setup PulseAudio stream"); return(-1); } if (SDL_NAME(pa_stream_connect_playback)(stream, NULL, &paattr, flags, NULL, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("Could not connect PulseAudio stream"); return(-1); } do { if (SDL_NAME(pa_mainloop_iterate)(mainloop, 1, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("pa_mainloop_iterate() failed"); return(-1); } state = SDL_NAME(pa_stream_get_state)(stream); if (!PA_STREAM_IS_GOOD(state)) { PULSE_CloseAudio(this); SDL_SetError("Could not create to PulseAudio stream"); return(-1); } } while (state != PA_STREAM_READY); return(0); }
static int PULSE_OpenAudio(_THIS, SDL_AudioSpec *spec) { int state; Uint16 test_format; pa_sample_spec paspec; pa_buffer_attr paattr; pa_channel_map pacmap; pa_stream_flags_t flags = 0; paspec.format = PA_SAMPLE_INVALID; for ( test_format = SDL_FirstAudioFormat(spec->format); test_format; ) { switch ( test_format ) { case AUDIO_U8: paspec.format = PA_SAMPLE_U8; break; case AUDIO_S16LSB: paspec.format = PA_SAMPLE_S16LE; break; case AUDIO_S16MSB: paspec.format = PA_SAMPLE_S16BE; break; } if ( paspec.format != PA_SAMPLE_INVALID ) break; test_format = SDL_NextAudioFormat(); } if (paspec.format == PA_SAMPLE_INVALID ) { SDL_SetError("Couldn't find any suitable audio formats"); return(-1); } spec->format = test_format; paspec.channels = spec->channels; paspec.rate = spec->freq; #ifdef PA_STREAM_ADJUST_LATENCY spec->samples /= 2; #endif SDL_CalculateAudioSpec(spec); mixlen = spec->size; mixbuf = (Uint8 *)SDL_AllocAudioMem(mixlen); if ( mixbuf == NULL ) { return(-1); } SDL_memset(mixbuf, spec->silence, spec->size); #ifdef PA_STREAM_ADJUST_LATENCY paattr.tlength = mixlen * 4; paattr.prebuf = -1; paattr.maxlength = -1; paattr.minreq = mixlen; flags = PA_STREAM_ADJUST_LATENCY; #else paattr.tlength = mixlen*2; paattr.prebuf = mixlen*2; paattr.maxlength = mixlen*2; paattr.minreq = mixlen; #endif SDL_NAME(pa_channel_map_init_auto)( &pacmap, spec->channels, PA_CHANNEL_MAP_WAVEEX); if (!(mainloop = SDL_NAME(pa_mainloop_new)())) { PULSE_CloseAudio(this); SDL_SetError("pa_mainloop_new() failed"); return(-1); } if (this->hidden->caption == NULL) { char *title = NULL; SDL_WM_GetCaption(&title, NULL); PULSE_SetCaption(this, title); } mainloop_api = SDL_NAME(pa_mainloop_get_api)(mainloop); if (!(context = SDL_NAME(pa_context_new)(mainloop_api, this->hidden->caption))) { PULSE_CloseAudio(this); SDL_SetError("pa_context_new() failed"); return(-1); } if (SDL_NAME(pa_context_connect)(context, NULL, 0, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("Could not setup connection to PulseAudio"); return(-1); } do { if (SDL_NAME(pa_mainloop_iterate)(mainloop, 1, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("pa_mainloop_iterate() failed"); return(-1); } state = SDL_NAME(pa_context_get_state)(context); if (!PA_CONTEXT_IS_GOOD(state)) { PULSE_CloseAudio(this); SDL_SetError("Could not connect to PulseAudio"); return(-1); } } while (state != PA_CONTEXT_READY); stream = SDL_NAME(pa_stream_new)( context, "Simple DirectMedia Layer", &paspec, &pacmap ); if ( stream == NULL ) { PULSE_CloseAudio(this); SDL_SetError("Could not setup PulseAudio stream"); return(-1); } if (SDL_NAME(pa_stream_connect_playback)(stream, NULL, &paattr, flags, NULL, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("Could not connect PulseAudio stream"); return(-1); } do { if (SDL_NAME(pa_mainloop_iterate)(mainloop, 1, NULL) < 0) { PULSE_CloseAudio(this); SDL_SetError("pa_mainloop_iterate() failed"); return(-1); } state = SDL_NAME(pa_stream_get_state)(stream); if (!PA_STREAM_IS_GOOD(state)) { PULSE_CloseAudio(this); SDL_SetError("Could not create to PulseAudio stream"); return(-1); } } while (state != PA_STREAM_READY); return(0); }
static ALCboolean pulse_open_capture( ALCdevice* device, const ALCchar* device_name ) //{{{ { char* pulse_name = NULL; pulse_data* data; pa_stream_flags_t flags = 0; pa_stream_state_t state; pa_channel_map chanmap; if ( !pulse_load() ) { return ALC_FALSE; } if ( !allCaptureDevNameMap ) { probe_devices( AL_TRUE ); } if ( !device_name ) { device_name = allCaptureDevNameMap[0].name; } else { ALuint i; for ( i = 0; i < numCaptureDevNames; i++ ) { if ( strcmp( device_name, allCaptureDevNameMap[i].name ) == 0 ) { pulse_name = allCaptureDevNameMap[i].device_name; break; } } if ( i == numCaptureDevNames ) { return ALC_FALSE; } } if ( pulse_open( device, device_name ) == ALC_FALSE ) { return ALC_FALSE; } data = device->ExtraData; ppa_threaded_mainloop_lock( data->loop ); data->samples = device->UpdateSize * device->NumUpdates; data->frame_size = aluFrameSizeFromFormat( device->Format ); if ( !( data->ring = CreateRingBuffer( data->frame_size, data->samples ) ) ) { ppa_threaded_mainloop_unlock( data->loop ); goto fail; } data->attr.minreq = -1; data->attr.prebuf = -1; data->attr.maxlength = data->frame_size * data->samples; data->attr.tlength = -1; data->attr.fragsize = min( data->frame_size * data->samples, 10 * device->Frequency / 1000 ); data->spec.rate = device->Frequency; data->spec.channels = aluChannelsFromFormat( device->Format ); switch ( aluBytesFromFormat( device->Format ) ) { case 1: data->spec.format = PA_SAMPLE_U8; break; case 2: data->spec.format = PA_SAMPLE_S16NE; break; case 4: data->spec.format = PA_SAMPLE_FLOAT32NE; break; default: AL_PRINT( "Unknown format: 0x%x\n", device->Format ); ppa_threaded_mainloop_unlock( data->loop ); goto fail; } if ( ppa_sample_spec_valid( &data->spec ) == 0 ) { AL_PRINT( "Invalid sample format\n" ); ppa_threaded_mainloop_unlock( data->loop ); goto fail; } if ( !ppa_channel_map_init_auto( &chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX ) ) { AL_PRINT( "Couldn't build map for channel count (%d)!\n", data->spec.channels ); ppa_threaded_mainloop_unlock( data->loop ); goto fail; } data->stream = ppa_stream_new( data->context, "Capture Stream", &data->spec, &chanmap ); if ( !data->stream ) { AL_PRINT( "pa_stream_new() failed: %s\n", ppa_strerror( ppa_context_errno( data->context ) ) ); ppa_threaded_mainloop_unlock( data->loop ); goto fail; } ppa_stream_set_state_callback( data->stream, stream_state_callback, data->loop ); flags |= PA_STREAM_START_CORKED | PA_STREAM_ADJUST_LATENCY; if ( ppa_stream_connect_record( data->stream, pulse_name, &data->attr, flags ) < 0 ) { AL_PRINT( "Stream did not connect: %s\n", ppa_strerror( ppa_context_errno( data->context ) ) ); ppa_stream_unref( data->stream ); data->stream = NULL; ppa_threaded_mainloop_unlock( data->loop ); goto fail; } while ( ( state = ppa_stream_get_state( data->stream ) ) != PA_STREAM_READY ) { if ( !PA_STREAM_IS_GOOD( state ) ) { AL_PRINT( "Stream did not get ready: %s\n", ppa_strerror( ppa_context_errno( data->context ) ) ); ppa_stream_unref( data->stream ); data->stream = NULL; ppa_threaded_mainloop_unlock( data->loop ); goto fail; } ppa_threaded_mainloop_wait( data->loop ); } ppa_stream_set_state_callback( data->stream, stream_state_callback2, device ); ppa_threaded_mainloop_unlock( data->loop ); return ALC_TRUE; fail: pulse_close( device ); return ALC_FALSE; } //}}}
static int tsmf_pulse_open_stream(TSMFPulseAudioDevice * pulse) { pa_stream_state_t state; pa_buffer_attr buffer_attr = { 0 }; if (!pulse->context) return 1; LLOGLN(0, ("tsmf_pulse_open_stream:")); pa_threaded_mainloop_lock(pulse->mainloop); pulse->stream = pa_stream_new(pulse->context, "freerdp", &pulse->sample_spec, NULL); if (!pulse->stream) { pa_threaded_mainloop_unlock(pulse->mainloop); LLOGLN(0, ("tsmf_pulse_open_stream: pa_stream_new failed (%d)", pa_context_errno(pulse->context))); return 1; } pa_stream_set_state_callback(pulse->stream, tsmf_pulse_stream_state_callback, pulse); pa_stream_set_write_callback(pulse->stream, tsmf_pulse_stream_request_callback, pulse); buffer_attr.maxlength = pa_usec_to_bytes(500000, &pulse->sample_spec); buffer_attr.tlength = pa_usec_to_bytes(250000, &pulse->sample_spec); buffer_attr.prebuf = (uint32_t) -1; buffer_attr.minreq = (uint32_t) -1; buffer_attr.fragsize = (uint32_t) -1; if (pa_stream_connect_playback(pulse->stream, pulse->device[0] ? pulse->device : NULL, &buffer_attr, PA_STREAM_ADJUST_LATENCY | PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL) < 0) { pa_threaded_mainloop_unlock(pulse->mainloop); LLOGLN(0, ("tsmf_pulse_open_stream: pa_stream_connect_playback failed (%d)", pa_context_errno(pulse->context))); return 1; } for (;;) { state = pa_stream_get_state(pulse->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { LLOGLN(0, ("tsmf_pulse_open_stream: bad stream state (%d)", pa_context_errno(pulse->context))); break; } pa_threaded_mainloop_wait(pulse->mainloop); } pa_threaded_mainloop_unlock(pulse->mainloop); if (state == PA_STREAM_READY) { LLOGLN(0, ("tsmf_pulse_open_stream: connected")); return 0; } else { tsmf_pulse_close_stream(pulse); return 1; } }
pa_simple* pa_simple_new( const char *server, const char *name, pa_stream_direction_t dir, const char *dev, const char *stream_name, const pa_sample_spec *ss, const pa_channel_map *map, const pa_buffer_attr *attr, int *rerror) { pa_simple *p; int error = PA_ERR_INTERNAL, r; CHECK_VALIDITY_RETURN_ANY(rerror, !server || *server, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, dir == PA_STREAM_PLAYBACK || dir == PA_STREAM_RECORD, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !dev || *dev, PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, ss && pa_sample_spec_valid(ss), PA_ERR_INVALID, NULL); CHECK_VALIDITY_RETURN_ANY(rerror, !map || (pa_channel_map_valid(map) && map->channels == ss->channels), PA_ERR_INVALID, NULL) p = pa_xnew0(pa_simple, 1); p->direction = dir; if (!(p->mainloop = pa_threaded_mainloop_new())) goto fail; if (!(p->context = pa_context_new(pa_threaded_mainloop_get_api(p->mainloop), name))) goto fail; pa_context_set_state_callback(p->context, context_state_cb, p); if (pa_context_connect(p->context, server, 0, NULL) < 0) { error = pa_context_errno(p->context); goto fail; } pa_threaded_mainloop_lock(p->mainloop); if (pa_threaded_mainloop_start(p->mainloop) < 0) goto unlock_and_fail; for (;;) { pa_context_state_t state; state = pa_context_get_state(p->context); if (state == PA_CONTEXT_READY) break; if (!PA_CONTEXT_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the context is ready */ pa_threaded_mainloop_wait(p->mainloop); } if (!(p->stream = pa_stream_new(p->context, stream_name, ss, map))) { error = pa_context_errno(p->context); goto unlock_and_fail; } pa_stream_set_state_callback(p->stream, stream_state_cb, p); pa_stream_set_read_callback(p->stream, stream_request_cb, p); pa_stream_set_write_callback(p->stream, stream_request_cb, p); pa_stream_set_latency_update_callback(p->stream, stream_latency_update_cb, p); if (dir == PA_STREAM_PLAYBACK) r = pa_stream_connect_playback(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL); else r = pa_stream_connect_record(p->stream, dev, attr, PA_STREAM_INTERPOLATE_TIMING |PA_STREAM_ADJUST_LATENCY |PA_STREAM_AUTO_TIMING_UPDATE); if (r < 0) { error = pa_context_errno(p->context); goto unlock_and_fail; } for (;;) { pa_stream_state_t state; state = pa_stream_get_state(p->stream); if (state == PA_STREAM_READY) break; if (!PA_STREAM_IS_GOOD(state)) { error = pa_context_errno(p->context); goto unlock_and_fail; } /* Wait until the stream is ready */ pa_threaded_mainloop_wait(p->mainloop); } pa_threaded_mainloop_unlock(p->mainloop); return p; unlock_and_fail: pa_threaded_mainloop_unlock(p->mainloop); fail: if (rerror) *rerror = error; pa_simple_free(p); return NULL; }