Beispiel #1
0
void
AudioStream::stream_state_callback(pa_stream* s, void* /*user_data*/)
{
    char str[PA_SAMPLE_SPEC_SNPRINT_MAX];

    switch (pa_stream_get_state(s)) {
        case PA_STREAM_CREATING:
            DEBUG("Stream is creating...");
            break;

        case PA_STREAM_TERMINATED:
            DEBUG("Stream is terminating...");
            break;

        case PA_STREAM_READY:
            DEBUG("Stream successfully created, connected to %s", pa_stream_get_device_name(s));
            DEBUG("maxlength %u", pa_stream_get_buffer_attr(s)->maxlength);
            DEBUG("tlength %u", pa_stream_get_buffer_attr(s)->tlength);
            DEBUG("prebuf %u", pa_stream_get_buffer_attr(s)->prebuf);
            DEBUG("minreq %u", pa_stream_get_buffer_attr(s)->minreq);
            DEBUG("fragsize %u", pa_stream_get_buffer_attr(s)->fragsize);
            DEBUG("samplespec %s", pa_sample_spec_snprint(str, sizeof(str), pa_stream_get_sample_spec(s)));
            break;

        case PA_STREAM_UNCONNECTED:
            DEBUG("Stream unconnected");
            break;

        case PA_STREAM_FAILED:
        default:
            ERROR("Sink/Source doesn't exists: %s" , pa_strerror(pa_context_errno(pa_stream_get_context(s))));
            break;
    }
}
/**
 * Pulseaudio stream state callback
 */
static void
stream_state_callback (pa_stream * s, void *userdata)
{
  GNUNET_assert (NULL != s);

  switch (pa_stream_get_state (s))
  {
  case PA_STREAM_CREATING:
  case PA_STREAM_TERMINATED:
    break;
  case PA_STREAM_READY:
    {
      const pa_buffer_attr *a;
      char cmt[PA_CHANNEL_MAP_SNPRINT_MAX];
      char sst[PA_SAMPLE_SPEC_SNPRINT_MAX];

      GNUNET_log (GNUNET_ERROR_TYPE_INFO,
		  _("Stream successfully created.\n"));

      if (!(a = pa_stream_get_buffer_attr (s)))
      {
	GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
		    _("pa_stream_get_buffer_attr() failed: %s\n"),
		    pa_strerror (pa_context_errno
				 (pa_stream_get_context (s))));

      }
      else
      {
	GNUNET_log (GNUNET_ERROR_TYPE_INFO,
		    _("Buffer metrics: maxlength=%u, fragsize=%u\n"),
		    a->maxlength, a->fragsize);
      }
      GNUNET_log (GNUNET_ERROR_TYPE_INFO,
		  _("Using sample spec '%s', channel map '%s'.\n"),
		  pa_sample_spec_snprint (sst, sizeof (sst),
					  pa_stream_get_sample_spec (s)),
		  pa_channel_map_snprint (cmt, sizeof (cmt),
					  pa_stream_get_channel_map (s)));

      GNUNET_log (GNUNET_ERROR_TYPE_INFO,
		  _("Connected to device %s (%u, %ssuspended).\n"),
		  pa_stream_get_device_name (s),
		  pa_stream_get_device_index (s),
		  pa_stream_is_suspended (s) ? "" : "not ");
    }
    break;
  case PA_STREAM_FAILED:
  default:
    GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
		_("Stream error: %s\n"),
		pa_strerror (pa_context_errno (pa_stream_get_context (s))));
    quit (1);
  }
}
Beispiel #3
0
// Return the current latency in seconds
static float get_delay(struct ao *ao)
{
    /* This code basically does what pa_stream_get_latency() _should_
     * do, but doesn't due to multiple known bugs in PulseAudio (at
     * PulseAudio version 2.1). In particular, the timing interpolation
     * mode (PA_STREAM_INTERPOLATE_TIMING) can return completely bogus
     * values, and the non-interpolating code has a bug causing too
     * large results at end of stream (so a stream never seems to finish).
     * This code can still return wrong values in some cases due to known
     * PulseAudio bugs that can not be worked around on the client side.
     *
     * We always query the server for latest timing info. This may take
     * too long to work well with remote audio servers, but at least
     * this should be enough to fix the normal local playback case.
     */
    struct priv *priv = ao->priv;
    pa_threaded_mainloop_lock(priv->mainloop);
    if (!waitop(priv, pa_stream_update_timing_info(priv->stream, NULL, NULL))) {
        GENERIC_ERR_MSG(priv->context, "pa_stream_update_timing_info() failed");
        return 0;
    }
    pa_threaded_mainloop_lock(priv->mainloop);
    const pa_timing_info *ti = pa_stream_get_timing_info(priv->stream);
    if (!ti) {
        pa_threaded_mainloop_unlock(priv->mainloop);
        GENERIC_ERR_MSG(priv->context, "pa_stream_get_timing_info() failed");
        return 0;
    }
    const struct pa_sample_spec *ss = pa_stream_get_sample_spec(priv->stream);
    if (!ss) {
        pa_threaded_mainloop_unlock(priv->mainloop);
        GENERIC_ERR_MSG(priv->context, "pa_stream_get_sample_spec() failed");
        return 0;
    }
    // data left in PulseAudio's main buffers (not written to sink yet)
    int64_t latency = pa_bytes_to_usec(ti->write_index - ti->read_index, ss);
    // since this info may be from a while ago, playback has progressed since
    latency -= ti->transport_usec;
    // data already moved from buffers to sink, but not played yet
    int64_t sink_latency = ti->sink_usec;
    if (!ti->playing)
        /* At the end of a stream, part of the data "left" in the sink may
         * be padding silence after the end; that should be subtracted to
         * get the amount of real audio from our stream. This adjustment
         * is missing from Pulseaudio's own get_latency calculations
         * (as of PulseAudio 2.1). */
        sink_latency -= pa_bytes_to_usec(ti->since_underrun, ss);
    if (sink_latency > 0)
        latency += sink_latency;
    if (latency < 0)
        latency = 0;
    pa_threaded_mainloop_unlock(priv->mainloop);
    return latency / 1e6;
}
Beispiel #4
0
/* This routine is called whenever the stream state changes */
static void stream_state_callback(pa_stream *s, void *userdata) {
    pa_assert(s);

    switch (pa_stream_get_state(s)) {
        case PA_STREAM_CREATING:
        case PA_STREAM_TERMINATED:
            break;

        case PA_STREAM_READY:

            if (verbose) {
                const pa_buffer_attr *a;
                char cmt[PA_CHANNEL_MAP_SNPRINT_MAX], sst[PA_SAMPLE_SPEC_SNPRINT_MAX];

                pa_log(_("Stream successfully created."));

                if (!(a = pa_stream_get_buffer_attr(s)))
                    pa_log(_("pa_stream_get_buffer_attr() failed: %s"), pa_strerror(pa_context_errno(pa_stream_get_context(s))));
                else {

                    if (mode == PLAYBACK)
                        pa_log(_("Buffer metrics: maxlength=%u, tlength=%u, prebuf=%u, minreq=%u"), a->maxlength, a->tlength, a->prebuf, a->minreq);
                    else {
                        pa_assert(mode == RECORD);
                        pa_log(_("Buffer metrics: maxlength=%u, fragsize=%u"), a->maxlength, a->fragsize);
                    }
                }

                pa_log(_("Using sample spec '%s', channel map '%s'."),
                        pa_sample_spec_snprint(sst, sizeof(sst), pa_stream_get_sample_spec(s)),
                        pa_channel_map_snprint(cmt, sizeof(cmt), pa_stream_get_channel_map(s)));

                pa_log(_("Connected to device %s (%u, %ssuspended)."),
                        pa_stream_get_device_name(s),
                        pa_stream_get_device_index(s),
                        pa_stream_is_suspended(s) ? "" : "not ");
            }

            break;

        case PA_STREAM_FAILED:
        default:
            pa_log(_("Stream error: %s"), pa_strerror(pa_context_errno(pa_stream_get_context(s))));
            quit(1);
    }
}
Beispiel #5
0
void PulseAudioSystem::write_callback(pa_stream *s, size_t bytes, void *userdata) {
	PulseAudioSystem *pas = reinterpret_cast<PulseAudioSystem *>(userdata);
	Q_ASSERT(s == pas->pasOutput);

	AudioOutputPtr ao = g.ao;
	PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(ao.get());

	unsigned char buffer[bytes];

	if (! pao) {
		// Transitioning, but most likely transitions back, so just zero.
		memset(buffer, 0, bytes);
		pa_stream_write(s, buffer, bytes, NULL, 0, PA_SEEK_RELATIVE);
		pas->wakeup();
		return;
	}

	const pa_sample_spec *pss = pa_stream_get_sample_spec(s);
	const pa_channel_map *pcm = pa_stream_get_channel_map(pas->pasOutput);
	if (!pa_sample_spec_equal(pss, &pao->pss) || !pa_channel_map_equal(pcm, &pao->pcm)) {
		pao->pss = *pss;
		pao->pcm = *pcm;
		if (pss->format == PA_SAMPLE_FLOAT32NE)
			pao->eSampleFormat = PulseAudioOutput::SampleFloat;
		else
			pao->eSampleFormat = PulseAudioOutput::SampleShort;
		pao->iMixerFreq = pss->rate;
		pao->iChannels = pss->channels;
		unsigned int chanmasks[pss->channels];
		for (int i=0;i<pss->channels;++i) {
			unsigned int cm = 0;
			switch (pcm->map[i]) {
				case PA_CHANNEL_POSITION_LEFT:
					cm = SPEAKER_FRONT_LEFT;
					break;
				case PA_CHANNEL_POSITION_RIGHT:
					cm = SPEAKER_FRONT_RIGHT;
					break;
				case PA_CHANNEL_POSITION_CENTER:
					cm = SPEAKER_FRONT_CENTER;
					break;
				case PA_CHANNEL_POSITION_REAR_LEFT:
					cm = SPEAKER_BACK_LEFT;
					break;
				case PA_CHANNEL_POSITION_REAR_RIGHT:
					cm = SPEAKER_BACK_RIGHT;
					break;
				case PA_CHANNEL_POSITION_REAR_CENTER:
					cm = SPEAKER_BACK_CENTER;
					break;
				case PA_CHANNEL_POSITION_LFE:
					cm = SPEAKER_LOW_FREQUENCY;
					break;
				case PA_CHANNEL_POSITION_SIDE_LEFT:
					cm = SPEAKER_SIDE_LEFT;
					break;
				case PA_CHANNEL_POSITION_SIDE_RIGHT:
					cm = SPEAKER_SIDE_RIGHT;
					break;
				case PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER:
					cm = SPEAKER_FRONT_LEFT_OF_CENTER;
					break;
				case PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER:
					cm = SPEAKER_FRONT_RIGHT_OF_CENTER;
					break;
				default:
					cm = 0;
					break;
			}
			chanmasks[i] = cm;
		}
		pao->initializeMixer(chanmasks);
	}

	const unsigned int iSampleSize = pao->iSampleSize;
	const unsigned int samples = static_cast<unsigned int>(bytes) / iSampleSize;
	bool oldAttenuation = pas->bAttenuating;

	// do we have some mixed output?
	if (pao->mix(buffer, samples)) {
		// attenuate if instructed to or it's in settings
		pas->bAttenuating = (g.bAttenuateOthers || g.s.bAttenuateOthers);

	} else {
		memset(buffer, 0, bytes);

		// attenuate if intructed to (self-activated)
		pas->bAttenuating = g.bAttenuateOthers;
	}

	// if the attenuation state has changed
	if (oldAttenuation != pas->bAttenuating) {
		pas->setVolumes();
	}

	pa_stream_write(s, buffer, iSampleSize * samples, NULL, 0, PA_SEEK_RELATIVE);
}
Beispiel #6
0
void PulseAudioSystem::read_callback(pa_stream *s, size_t bytes, void *userdata) {
	PulseAudioSystem *pas = reinterpret_cast<PulseAudioSystem *>(userdata);

	size_t length = bytes;
	const void *data = NULL;
	pa_stream_peek(s, &data, &length);
	if (data == NULL && length > 0) {
		qWarning("PulseAudio: pa_stream_peek reports no data at current read index.");
	} else if (data == NULL && length == 0) {
		qWarning("PulseAudio: pa_stream_peek reports empty memblockq.");
	} else if (data == NULL || length == 0) {
		qWarning("PulseAudio: invalid pa_stream_peek state encountered.");
		return;
	}

	AudioInputPtr ai = g.ai;
	PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(ai.get());
	if (! pai) {
		if (length > 0) {
			pa_stream_drop(s);
		}
		pas->wakeup();
		return;
	}

	const pa_sample_spec *pss = pa_stream_get_sample_spec(s);

	if (s == pas->pasInput) {
		if (!pa_sample_spec_equal(pss, &pai->pssMic)) {
			pai->pssMic = *pss;
			pai->iMicFreq = pss->rate;
			pai->iMicChannels = pss->channels;
			if (pss->format == PA_SAMPLE_FLOAT32NE)
				pai->eMicFormat = PulseAudioInput::SampleFloat;
			else
				pai->eMicFormat = PulseAudioInput::SampleShort;
			pai->initializeMixer();
		}
		if (data != NULL) {
			pai->addMic(data, static_cast<unsigned int>(length) / pai->iMicSampleSize);
		}
	} else if (s == pas->pasSpeaker) {
		if (!pa_sample_spec_equal(pss, &pai->pssEcho)) {
			pai->pssEcho = *pss;
			pai->iEchoFreq = pss->rate;
			pai->iEchoChannels = pss->channels;
			if (pss->format == PA_SAMPLE_FLOAT32NE)
				pai->eEchoFormat = PulseAudioInput::SampleFloat;
			else
				pai->eEchoFormat = PulseAudioInput::SampleShort;
			pai->initializeMixer();
		}
		if (data != NULL) {
			pai->addEcho(data, static_cast<unsigned int>(length) / pai->iEchoSampleSize);
		}
	}

	if (length > 0) {
		pa_stream_drop(s);
	}
}
Beispiel #7
0
void PulseAudioSystem::eventCallback(pa_mainloop_api *api, pa_defer_event *) {
	api->defer_enable(pade, false);

	if (! bSourceDone || ! bSinkDone || ! bServerDone)
		return;

	AudioInputPtr ai = g.ai;
	AudioOutputPtr ao = g.ao;
	AudioInput *raw_ai = ai.get();
	AudioOutput *raw_ao = ao.get();
	PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(raw_ai);
	PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(raw_ao);

	if (raw_ao) {
		QString odev = outputDevice();
		pa_stream_state ost = pasOutput ? pa_stream_get_state(pasOutput) : PA_STREAM_TERMINATED;
		bool do_stop = false;
		bool do_start = false;

		if (! pao && (ost == PA_STREAM_READY)) {
			do_stop = true;
		} else if (pao) {
			switch (ost) {
				case PA_STREAM_TERMINATED: {
						if (pasOutput)
							pa_stream_unref(pasOutput);

						pa_sample_spec pss = qhSpecMap.value(odev);
						pa_channel_map pcm = qhChanMap.value(odev);
						if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
							pss.format = PA_SAMPLE_FLOAT32NE;
						if (pss.rate == 0)
							pss.rate = SAMPLE_RATE;
						if ((pss.channels == 0) || (! g.s.doPositionalAudio()))
							pss.channels = 1;

						pasOutput = pa_stream_new(pacContext, mumble_sink_input, &pss, (pss.channels == 1) ? NULL : &pcm);
						pa_stream_set_state_callback(pasOutput, stream_callback, this);
						pa_stream_set_write_callback(pasOutput, write_callback, this);
					}
				case PA_STREAM_UNCONNECTED:
					do_start = true;
					break;
				case PA_STREAM_READY: {
						if (g.s.iOutputDelay != iDelayCache) {
							do_stop = true;
						} else if (g.s.doPositionalAudio() != bPositionalCache) {
							do_stop = true;
						} else if (odev != qsOutputCache) {
							do_stop = true;
						}
						break;
					}
				default:
					break;
			}
		}
		if (do_stop) {
			qWarning("PulseAudio: Stopping output");
			pa_stream_disconnect(pasOutput);
			iSinkId = -1;
		} else if (do_start) {
			qWarning("PulseAudio: Starting output: %s", qPrintable(odev));
			pa_buffer_attr buff;
			const pa_sample_spec *pss = pa_stream_get_sample_spec(pasOutput);
			const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short);
			const unsigned int iBlockLen = ((pao->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize);
			buff.tlength = iBlockLen * (g.s.iOutputDelay+1);
			buff.minreq = iBlockLen;
			buff.maxlength = -1;
			buff.prebuf = -1;
			buff.fragsize = iBlockLen;

			iDelayCache = g.s.iOutputDelay;
			bPositionalCache = g.s.doPositionalAudio();
			qsOutputCache = odev;

			pa_stream_connect_playback(pasOutput, qPrintable(odev), &buff, PA_STREAM_ADJUST_LATENCY, NULL, NULL);
			pa_context_get_sink_info_by_name(pacContext, qPrintable(odev), sink_info_callback, this);
		}
	}

	if (raw_ai) {
		QString idev = inputDevice();
		pa_stream_state ist = pasInput ? pa_stream_get_state(pasInput) : PA_STREAM_TERMINATED;
		bool do_stop = false;
		bool do_start = false;

		if (! pai && (ist == PA_STREAM_READY)) {
			do_stop = true;
		} else if (pai) {
			switch (ist) {
				case PA_STREAM_TERMINATED: {
						if (pasInput)
							pa_stream_unref(pasInput);

						pa_sample_spec pss = qhSpecMap.value(idev);
						if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
							pss.format = PA_SAMPLE_FLOAT32NE;
						if (pss.rate == 0)
							pss.rate = SAMPLE_RATE;
						pss.channels = 1;

						pasInput = pa_stream_new(pacContext, "Microphone", &pss, NULL);
						pa_stream_set_state_callback(pasInput, stream_callback, this);
						pa_stream_set_read_callback(pasInput, read_callback, this);
					}
				case PA_STREAM_UNCONNECTED:
					do_start = true;
					break;
				case PA_STREAM_READY: {
						if (idev != qsInputCache) {
							do_stop = true;
						}
						break;
					}
				default:
					break;
			}
		}
		if (do_stop) {
			qWarning("PulseAudio: Stopping input");
			pa_stream_disconnect(pasInput);
		} else if (do_start) {
			qWarning("PulseAudio: Starting input %s",qPrintable(idev));
			pa_buffer_attr buff;
			const pa_sample_spec *pss = pa_stream_get_sample_spec(pasInput);
			const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short);
			const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize);
			buff.tlength = iBlockLen;
			buff.minreq = iBlockLen;
			buff.maxlength = -1;
			buff.prebuf = -1;
			buff.fragsize = iBlockLen;

			qsInputCache = idev;

			pa_stream_connect_record(pasInput, qPrintable(idev), &buff, PA_STREAM_ADJUST_LATENCY);
		}
	}

	if (raw_ai) {
		QString odev = outputDevice();
		QString edev = qhEchoMap.value(odev);
		pa_stream_state est = pasSpeaker ? pa_stream_get_state(pasSpeaker) : PA_STREAM_TERMINATED;
		bool do_stop = false;
		bool do_start = false;

		if ((! pai || ! g.s.doEcho()) && (est == PA_STREAM_READY)) {
			do_stop = true;
		} else if (pai && g.s.doEcho()) {
			switch (est) {
				case PA_STREAM_TERMINATED: {
						if (pasSpeaker)
							pa_stream_unref(pasSpeaker);

						pa_sample_spec pss = qhSpecMap.value(edev);
						pa_channel_map pcm = qhChanMap.value(edev);
						if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
							pss.format = PA_SAMPLE_FLOAT32NE;
						if (pss.rate == 0)
							pss.rate = SAMPLE_RATE;
						if ((pss.channels == 0) || (! g.s.bEchoMulti))
							pss.channels = 1;

						pasSpeaker = pa_stream_new(pacContext, mumble_echo, &pss, (pss.channels == 1) ? NULL : &pcm);
						pa_stream_set_state_callback(pasSpeaker, stream_callback, this);
						pa_stream_set_read_callback(pasSpeaker, read_callback, this);
					}
				case PA_STREAM_UNCONNECTED:
					do_start = true;
					break;
				case PA_STREAM_READY: {
						if (g.s.bEchoMulti != bEchoMultiCache) {
							do_stop = true;
						} else if (edev != qsEchoCache) {
							do_stop = true;
						}
						break;
					}
				default:
					break;
			}
		}
		if (do_stop) {
			qWarning("PulseAudio: Stopping echo");
			pa_stream_disconnect(pasSpeaker);
		} else if (do_start) {
			qWarning("PulseAudio: Starting echo: %s", qPrintable(edev));
			pa_buffer_attr buff;
			const pa_sample_spec *pss = pa_stream_get_sample_spec(pasSpeaker);
			const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short);
			const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize);
			buff.tlength = iBlockLen;
			buff.minreq = iBlockLen;
			buff.maxlength = -1;
			buff.prebuf = -1;
			buff.fragsize = iBlockLen;

			bEchoMultiCache = g.s.bEchoMulti;
			qsEchoCache = edev;

			pa_stream_connect_record(pasSpeaker, qPrintable(edev), &buff, PA_STREAM_ADJUST_LATENCY);
		}
	}
}
Beispiel #8
0
FXbool PulseOutput::configure(const AudioFormat & fmt){
  const pa_sample_spec * config=nullptr;
  pa_operation *operation=nullptr;

  if (!open())
    return false;

  if (stream && fmt==af)
    return true;

  if (stream) {
    pa_stream_disconnect(stream);
    pa_stream_unref(stream);
    stream=nullptr;
    }

  pa_sample_spec spec;
  pa_channel_map cmap;

  if (!to_pulse_format(fmt,spec.format))
    goto failed;

  spec.rate     = fmt.rate;
  spec.channels = fmt.channels;


  // setup channel map
  pa_channel_map_init(&cmap);
  cmap.channels = fmt.channels;
  for (FXint i=0;i<fmt.channels;i++) {
    switch(fmt.channeltype(i)) {
      case Channel::None        : cmap.map[i] = PA_CHANNEL_POSITION_INVALID;      break;
      case Channel::Mono        : cmap.map[i] = PA_CHANNEL_POSITION_MONO;         break;
      case Channel::FrontLeft   : cmap.map[i] = PA_CHANNEL_POSITION_FRONT_LEFT;   break;
      case Channel::FrontRight  : cmap.map[i] = PA_CHANNEL_POSITION_FRONT_RIGHT;  break;
      case Channel::FrontCenter : cmap.map[i] = PA_CHANNEL_POSITION_FRONT_CENTER; break;
      case Channel::BackLeft    : cmap.map[i] = PA_CHANNEL_POSITION_REAR_LEFT;    break;
      case Channel::BackRight   : cmap.map[i] = PA_CHANNEL_POSITION_REAR_RIGHT;   break;
      case Channel::BackCenter  : cmap.map[i] = PA_CHANNEL_POSITION_REAR_CENTER;  break;
      case Channel::SideLeft    : cmap.map[i] = PA_CHANNEL_POSITION_SIDE_LEFT;    break;
      case Channel::SideRight   : cmap.map[i] = PA_CHANNEL_POSITION_SIDE_RIGHT;   break;
      case Channel::LFE         : cmap.map[i] = PA_CHANNEL_POSITION_LFE;          break;
      default: goto failed;
      }
    }

  stream = pa_stream_new(pulse_context,"Goggles Music Manager",&spec,&cmap);
  if (stream == nullptr)
    goto failed;

#ifdef DEBUG
  pa_stream_set_state_callback(stream,stream_state_callback,this);
#endif
  //pa_stream_set_write_callback(stream,stream_write_callback,this);

  if (pa_stream_connect_playback(stream,nullptr,nullptr,PA_STREAM_NOFLAGS,nullptr,nullptr)<0)
    goto failed;

  /// Wait until stream is ready
  pa_stream_state_t state;
  while((state=pa_stream_get_state(stream))!=PA_STREAM_READY) {
    if (state==PA_STREAM_FAILED || state==PA_STREAM_TERMINATED){
      goto failed;
      }
    context->wait_plugin_events();
    }

  /// Get Actual Format
  config = pa_stream_get_sample_spec(stream);
  if (!to_gap_format(config->format,af))
    goto failed;
  af.channels=config->channels;
  af.rate=config->rate;
  af.channelmap=fmt.channelmap;

  /// Get Current Volume
  operation = pa_context_get_sink_input_info(pulse_context,pa_stream_get_index(stream),sink_info_callback,this);
  if (operation) pa_operation_unref(operation);

  return true;
failed:
  GM_DEBUG_PRINT("[pulse] Unsupported pulse configuration:\n");
  fmt.debug();
  return false;
  }
Beispiel #9
0
static ALCboolean pulse_reset_playback(ALCdevice *device) //{{{
{
    pulse_data *data = device->ExtraData;
    pa_stream_flags_t flags = 0;
    pa_channel_map chanmap;

    pa_threaded_mainloop_lock(data->loop);

    if(!(device->Flags&DEVICE_CHANNELS_REQUEST))
    {
        pa_operation *o;
        o = pa_context_get_sink_info_by_name(data->context, data->device_name, sink_info_callback, device);
        while(pa_operation_get_state(o) == PA_OPERATION_RUNNING)
            pa_threaded_mainloop_wait(data->loop);
        pa_operation_unref(o);
    }
    if(!(device->Flags&DEVICE_FREQUENCY_REQUEST))
        flags |= PA_STREAM_FIX_RATE;

    data->frame_size = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);
    data->attr.prebuf = -1;
    data->attr.fragsize = -1;
    data->attr.minreq = device->UpdateSize * data->frame_size;
    data->attr.tlength = data->attr.minreq * maxu(device->NumUpdates, 2);
    data->attr.maxlength = -1;
    flags |= PA_STREAM_EARLY_REQUESTS;
    flags |= PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE;

    switch(device->FmtType)
    {
        case DevFmtByte:
            device->FmtType = DevFmtUByte;
            /* fall-through */
        case DevFmtUByte:
            data->spec.format = PA_SAMPLE_U8;
            break;
        case DevFmtUShort:
            device->FmtType = DevFmtShort;
            /* fall-through */
        case DevFmtShort:
            data->spec.format = PA_SAMPLE_S16NE;
            break;
        case DevFmtFloat:
            data->spec.format = PA_SAMPLE_FLOAT32NE;
            break;
    }
    data->spec.rate = device->Frequency;
    data->spec.channels = ChannelsFromDevFmt(device->FmtChans);

    if(pa_sample_spec_valid(&data->spec) == 0)
    {
        ERR("Invalid sample format\n");
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }

    if(!pa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX))
    {
        ERR("Couldn't build map for channel count (%d)!\n", data->spec.channels);
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }
    SetDefaultWFXChannelOrder(device);

    data->stream = connect_playback_stream(device, flags, &data->attr, &data->spec, &chanmap);
    if(!data->stream)
    {
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }

    pa_stream_set_state_callback(data->stream, stream_state_callback2, device);

    data->spec = *(pa_stream_get_sample_spec(data->stream));
    if(device->Frequency != data->spec.rate)
    {
        pa_operation *o;

        if((device->Flags&DEVICE_FREQUENCY_REQUEST))
            ERR("Failed to set frequency %dhz, got %dhz instead\n", device->Frequency, data->spec.rate);
        device->Flags &= ~DEVICE_FREQUENCY_REQUEST;

        /* Server updated our playback rate, so modify the buffer attribs
         * accordingly. */
        data->attr.minreq = (ALuint64)(data->attr.minreq/data->frame_size) *
                            data->spec.rate / device->Frequency * data->frame_size;
        data->attr.tlength = data->attr.minreq * maxu(device->NumUpdates, 2);

        o = pa_stream_set_buffer_attr(data->stream, &data->attr,
                                      stream_success_callback, device);
        while(pa_operation_get_state(o) == PA_OPERATION_RUNNING)
            pa_threaded_mainloop_wait(data->loop);
        pa_operation_unref(o);

        device->Frequency = data->spec.rate;
    }

#if PA_CHECK_VERSION(0,9,15)
    if(pa_stream_set_buffer_attr_callback)
        pa_stream_set_buffer_attr_callback(data->stream, stream_buffer_attr_callback, device);
#endif
    pa_stream_set_moved_callback(data->stream, stream_device_callback, device);
    pa_stream_set_write_callback(data->stream, stream_write_callback, device);
    pa_stream_set_underflow_callback(data->stream, stream_signal_callback, device);

    data->attr = *(pa_stream_get_buffer_attr(data->stream));
    ERR("PulseAudio returned minreq=%d, tlength=%d\n", data->attr.minreq, data->attr.tlength);
    device->UpdateSize = data->attr.minreq / data->frame_size;
    device->NumUpdates = (data->attr.tlength/data->frame_size) / device->UpdateSize;
    while(device->NumUpdates <= 2)
    {
        pa_operation *o;

        ERR("minreq too high - expect lag or break up\n");

        /* Server gave a comparatively large minreq, so modify the tlength. */
        device->NumUpdates = 2;
        data->attr.tlength = data->attr.minreq * device->NumUpdates;

        o = pa_stream_set_buffer_attr(data->stream, &data->attr,
                                      stream_success_callback, device);

        while(pa_operation_get_state(o) == PA_OPERATION_RUNNING)
            pa_threaded_mainloop_wait(data->loop);
        pa_operation_unref(o);

        data->attr = *(pa_stream_get_buffer_attr(data->stream));
        ERR("PulseAudio returned minreq=%d, tlength=%d", data->attr.minreq, data->attr.tlength);
        device->UpdateSize = data->attr.minreq / data->frame_size;
        device->NumUpdates = (data->attr.tlength/data->frame_size) / device->UpdateSize;
    }


    data->thread = StartThread(PulseProc, device);
    if(!data->thread)
    {
#if PA_CHECK_VERSION(0,9,15)
        if(pa_stream_set_buffer_attr_callback)
            pa_stream_set_buffer_attr_callback(data->stream, NULL, NULL);
#endif
        pa_stream_set_moved_callback(data->stream, NULL, NULL);
        pa_stream_set_write_callback(data->stream, NULL, NULL);
        pa_stream_set_underflow_callback(data->stream, NULL, NULL);
        pa_stream_disconnect(data->stream);
        pa_stream_unref(data->stream);
        data->stream = NULL;

        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }

    pa_threaded_mainloop_unlock(data->loop);
    return ALC_TRUE;
} //}}}
Beispiel #10
0
Datei: pulse.c Projekt: Kafay/vlc
/*****************************************************************************
 * Open: open the audio device
 *****************************************************************************/
static int Open ( vlc_object_t *p_this )
{
    aout_instance_t *p_aout = (aout_instance_t *)p_this;
    struct aout_sys_t * p_sys;
    struct pa_sample_spec ss;
    const struct pa_buffer_attr *buffer_attr;
    struct pa_buffer_attr a;
    struct pa_channel_map map;

    /* Allocate structures */
    p_aout->output.p_sys = p_sys = calloc( 1, sizeof( aout_sys_t ) );
    if( p_sys == NULL )
        return VLC_ENOMEM;

    PULSE_DEBUG( "Pulse start initialization");

    ss.channels = aout_FormatNbChannels( &p_aout->output.output ); /* Get the input stream channel count */

    /* Setup the pulse audio stream based on the input stream count */
    switch(ss.channels)
    {
        case 8:
            p_aout->output.output.i_physical_channels
                = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_CENTER
                | AOUT_CHAN_MIDDLELEFT | AOUT_CHAN_MIDDLERIGHT
                | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT
                | AOUT_CHAN_LFE;
            break;
        case 6:
            p_aout->output.output.i_physical_channels
                = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT | AOUT_CHAN_CENTER
                | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT
                | AOUT_CHAN_LFE;
            break;

        case 4:
            p_aout->output.output.i_physical_channels
                = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT
                | AOUT_CHAN_REARLEFT | AOUT_CHAN_REARRIGHT;
            break;

        case 2:
            p_aout->output.output.i_physical_channels
                = AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT;
            break;

        case 1:
            p_aout->output.output.i_physical_channels = AOUT_CHAN_CENTER;
            break;

        default:
            msg_Err(p_aout,"Invalid number of channels");
        goto fail;
    }

    /* Add a quick command line info message */
    msg_Info(p_aout, "No. of Audio Channels: %d", ss.channels);

    ss.rate = p_aout->output.output.i_rate;
    ss.format = PA_SAMPLE_FLOAT32NE;
    p_aout->output.output.i_format = VLC_CODEC_FL32;

    if (!pa_sample_spec_valid(&ss)) {
        msg_Err(p_aout,"Invalid sample spec");
        goto fail;
    }

    /* Reduce overall latency to 200mS to reduce audible clicks
     * Also pulse minreq and internal buffers are now 20mS which reduces resampling
     */
    a.tlength = pa_bytes_per_second(&ss)/5;
    a.maxlength = a.tlength * 2;
    a.prebuf = a.tlength / 2;
    a.minreq = a.tlength / 10;

    /* Buffer size is 20mS */
    p_sys->buffer_size = a.minreq;

    /* Initialise the speaker map setup above */
    pa_channel_map_init_auto(&map, ss.channels, PA_CHANNEL_MAP_ALSA);

    if (!(p_sys->mainloop = pa_threaded_mainloop_new())) {
        msg_Err(p_aout, "Failed to allocate main loop");
        goto fail;
    }

    if (!(p_sys->context = pa_context_new(pa_threaded_mainloop_get_api(p_sys->mainloop), _( PULSE_CLIENT_NAME )))) {
        msg_Err(p_aout, "Failed to allocate context");
        goto fail;
    }

    pa_context_set_state_callback(p_sys->context, context_state_cb, p_aout);

    PULSE_DEBUG( "Pulse before context connect");

    if (pa_context_connect(p_sys->context, NULL, 0, NULL) < 0) {
        msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context)));
        goto fail;
    }

    PULSE_DEBUG( "Pulse after context connect");

    pa_threaded_mainloop_lock(p_sys->mainloop);

    if (pa_threaded_mainloop_start(p_sys->mainloop) < 0) {
        msg_Err(p_aout, "Failed to start main loop");
        goto unlock_and_fail;
    }

    msg_Dbg(p_aout, "Pulse mainloop started");

    /* Wait until the context is ready */
    pa_threaded_mainloop_wait(p_sys->mainloop);

    if (pa_context_get_state(p_sys->context) != PA_CONTEXT_READY) {
        msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context)));
        goto unlock_and_fail;
    }

    if (!(p_sys->stream = pa_stream_new(p_sys->context, "audio stream", &ss, &map))) {
        msg_Err(p_aout, "Failed to create stream: %s", pa_strerror(pa_context_errno(p_sys->context)));
        goto unlock_and_fail;
    }

    PULSE_DEBUG( "Pulse after new stream");

    pa_stream_set_state_callback(p_sys->stream, stream_state_cb, p_aout);
    pa_stream_set_write_callback(p_sys->stream, stream_request_cb, p_aout);
    pa_stream_set_latency_update_callback(p_sys->stream, stream_latency_update_cb, p_aout);

    if (pa_stream_connect_playback(p_sys->stream, NULL, &a, PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_AUTO_TIMING_UPDATE|PA_STREAM_ADJUST_LATENCY, NULL, NULL) < 0) {
        msg_Err(p_aout, "Failed to connect stream: %s", pa_strerror(pa_context_errno(p_sys->context)));
        goto unlock_and_fail;
    }

     PULSE_DEBUG("Pulse stream connect");

    /* Wait until the stream is ready */
    pa_threaded_mainloop_wait(p_sys->mainloop);

    msg_Dbg(p_aout,"Pulse stream connected");

    if (pa_stream_get_state(p_sys->stream) != PA_STREAM_READY) {
        msg_Err(p_aout, "Failed to connect to server: %s", pa_strerror(pa_context_errno(p_sys->context)));
        goto unlock_and_fail;
    }


    PULSE_DEBUG("Pulse after stream get status");

    pa_threaded_mainloop_unlock(p_sys->mainloop);

    buffer_attr = pa_stream_get_buffer_attr(p_sys->stream);
    p_aout->output.i_nb_samples = buffer_attr->minreq / pa_frame_size(&ss);
    p_aout->output.pf_play = Play;
    aout_VolumeSoftInit(p_aout);
    msg_Dbg(p_aout, "Pulse initialized successfully");
    {
        char cmt[PA_CHANNEL_MAP_SNPRINT_MAX], sst[PA_SAMPLE_SPEC_SNPRINT_MAX];

        msg_Dbg(p_aout, "Buffer metrics: maxlength=%u, tlength=%u, prebuf=%u, minreq=%u", buffer_attr->maxlength, buffer_attr->tlength, buffer_attr->prebuf, buffer_attr->minreq);
        msg_Dbg(p_aout, "Using sample spec '%s', channel map '%s'.",
                pa_sample_spec_snprint(sst, sizeof(sst), pa_stream_get_sample_spec(p_sys->stream)),
                pa_channel_map_snprint(cmt, sizeof(cmt), pa_stream_get_channel_map(p_sys->stream)));

            msg_Dbg(p_aout, "Connected to device %s (%u, %ssuspended).",
                        pa_stream_get_device_name(p_sys->stream),
                        pa_stream_get_device_index(p_sys->stream),
                        pa_stream_is_suspended(p_sys->stream) ? "" : "not ");
    }

    return VLC_SUCCESS;

unlock_and_fail:
    msg_Dbg(p_aout, "Pulse initialization unlock and fail");

    if (p_sys->mainloop)
        pa_threaded_mainloop_unlock(p_sys->mainloop);
fail:
    msg_Err(p_aout, "Pulse initialization failed");
    uninit(p_aout);
    return VLC_EGENERIC;
}
Beispiel #11
0
static ALCboolean pulse_reset_playback(ALCdevice *device)
{
    pulse_data *data = device->ExtraData;
    pa_stream_flags_t flags = 0;
    pa_channel_map chanmap;

    pa_threaded_mainloop_lock(data->loop);

    if(data->stream)
    {
#if PA_CHECK_VERSION(0,9,15)
        if(pa_stream_set_buffer_attr_callback)
            pa_stream_set_buffer_attr_callback(data->stream, NULL, NULL);
#endif
        pa_stream_disconnect(data->stream);
        pa_stream_unref(data->stream);
        data->stream = NULL;
    }

    if(!(device->Flags&DEVICE_CHANNELS_REQUEST))
    {
        pa_operation *o;
        o = pa_context_get_sink_info_by_name(data->context, data->device_name, sink_info_callback, device);
        WAIT_FOR_OPERATION(o, data->loop);
    }
    if(!(device->Flags&DEVICE_FREQUENCY_REQUEST))
        flags |= PA_STREAM_FIX_RATE;

    flags |= PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE;
    flags |= PA_STREAM_ADJUST_LATENCY;
    flags |= PA_STREAM_START_CORKED;
    flags |= PA_STREAM_DONT_MOVE;

    switch(device->FmtType)
    {
        case DevFmtByte:
            device->FmtType = DevFmtUByte;
            /* fall-through */
        case DevFmtUByte:
            data->spec.format = PA_SAMPLE_U8;
            break;
        case DevFmtUShort:
            device->FmtType = DevFmtShort;
            /* fall-through */
        case DevFmtShort:
            data->spec.format = PA_SAMPLE_S16NE;
            break;
        case DevFmtUInt:
            device->FmtType = DevFmtInt;
            /* fall-through */
        case DevFmtInt:
            data->spec.format = PA_SAMPLE_S32NE;
            break;
        case DevFmtFloat:
            data->spec.format = PA_SAMPLE_FLOAT32NE;
            break;
    }
    data->spec.rate = device->Frequency;
    data->spec.channels = ChannelsFromDevFmt(device->FmtChans);

    if(pa_sample_spec_valid(&data->spec) == 0)
    {
        ERR("Invalid sample format\n");
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }

    if(!pa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX))
    {
        ERR("Couldn't build map for channel count (%d)!\n", data->spec.channels);
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }
    SetDefaultWFXChannelOrder(device);

    data->attr.fragsize = -1;
    data->attr.prebuf = 0;
    data->attr.minreq = device->UpdateSize * pa_frame_size(&data->spec);
    data->attr.tlength = data->attr.minreq * maxu(device->NumUpdates, 2);
    data->attr.maxlength = -1;

    data->stream = connect_playback_stream(data->device_name, data->loop,
                                           data->context, flags, &data->attr,
                                           &data->spec, &chanmap);
    if(!data->stream)
    {
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }
    pa_stream_set_state_callback(data->stream, stream_state_callback2, device);

    data->spec = *(pa_stream_get_sample_spec(data->stream));
    if(device->Frequency != data->spec.rate)
    {
        pa_operation *o;

        /* Server updated our playback rate, so modify the buffer attribs
         * accordingly. */
        data->attr.minreq = (ALuint64)device->UpdateSize * data->spec.rate /
                            device->Frequency * pa_frame_size(&data->spec);
        data->attr.tlength = data->attr.minreq * maxu(device->NumUpdates, 2);
        data->attr.prebuf = 0;

        o = pa_stream_set_buffer_attr(data->stream, &data->attr,
                                      stream_success_callback, device);
        WAIT_FOR_OPERATION(o, data->loop);

        device->Frequency = data->spec.rate;
    }

#if PA_CHECK_VERSION(0,9,15)
    if(pa_stream_set_buffer_attr_callback)
        pa_stream_set_buffer_attr_callback(data->stream, stream_buffer_attr_callback, device);
#endif
    stream_buffer_attr_callback(data->stream, device);

    device->NumUpdates = device->UpdateSize*device->NumUpdates /
                         (data->attr.minreq/pa_frame_size(&data->spec));
    device->NumUpdates = maxu(device->NumUpdates, 2);
    device->UpdateSize = data->attr.minreq / pa_frame_size(&data->spec);

    pa_threaded_mainloop_unlock(data->loop);
    return ALC_TRUE;
}
Beispiel #12
0
static int
pulseaudio_check_passthru(audio_decoder_t *ad, int codec_id)
{
  decoder_t *d = (decoder_t *)ad;

  int e;

  switch(codec_id) {
  case AV_CODEC_ID_DTS:
    e = PA_ENCODING_DTS_IEC61937;
    break;
  case AV_CODEC_ID_AC3:
    e = PA_ENCODING_AC3_IEC61937;
    break;
  case AV_CODEC_ID_EAC3:
    e = PA_ENCODING_EAC3_IEC61937;
    break;
#if 0
  case AV_CODEC_ID_MP1:
  case AV_CODEC_ID_MP2:
  case AV_CODEC_ID_MP3:
    e = PA_ENCODING_MPEG_IEC61937;
    break;
#endif
  default:
    return 0;
  }

  pa_threaded_mainloop_lock(mainloop);

  if(pulseaudio_make_context_ready()) {
    pa_threaded_mainloop_unlock(mainloop);
    return 0;
  }

  if(d->s) {
    pa_stream_disconnect(d->s);
    pa_stream_unref(d->s);
  }

  pa_format_info *vec[1];

  vec[0] = pa_format_info_new();

  vec[0]->encoding = e;
  pa_format_info_set_rate(vec[0], 48000);
  pa_format_info_set_channels(vec[0], 2);

  d->s = pa_stream_new_extended(ctx, "Showtime playback", vec, 1, NULL);
  int flags = 0;

  pa_stream_set_state_callback(d->s, stream_state_callback, d);
  pa_stream_set_write_callback(d->s, stream_write_callback, d);

  flags |= PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_INTERPOLATE_TIMING | 
    PA_STREAM_NOT_MONOTONIC;

  pa_stream_connect_playback(d->s, NULL, NULL, flags, NULL, NULL);

  while(1) {
    switch(pa_stream_get_state(d->s)) {
    case PA_STREAM_UNCONNECTED:
    case PA_STREAM_CREATING:
      pa_threaded_mainloop_wait(mainloop);
      continue;

    case PA_STREAM_READY:
      pa_threaded_mainloop_unlock(mainloop);
      ad->ad_tile_size = 512;
      d->ss = *pa_stream_get_sample_spec(d->s);
      return 1;

    case PA_STREAM_TERMINATED:
    case PA_STREAM_FAILED:
      pa_threaded_mainloop_unlock(mainloop);
      return 0;
    }
  }
}
Beispiel #13
0
static int Open(vlc_object_t *obj)
{
    demux_t *demux = (demux_t *)obj;

    demux_sys_t *sys = malloc(sizeof (*sys));
    if (unlikely(sys == NULL))
        return VLC_ENOMEM;

    sys->context = vlc_pa_connect(obj, &sys->mainloop);
    if (sys->context == NULL) {
        free(sys);
        return VLC_EGENERIC;
    }

    sys->stream = NULL;
    sys->es = NULL;
    sys->discontinuity = false;
    sys->caching = INT64_C(1000) * var_InheritInteger(obj, "live-caching");
    demux->p_sys = sys;

    /* Stream parameters */
    struct pa_sample_spec ss;
    ss.format = PA_SAMPLE_S16NE;
    ss.rate = 48000;
    ss.channels = 2;
    assert(pa_sample_spec_valid(&ss));

    struct pa_channel_map map;
    map.channels = 2;
    map.map[0] = PA_CHANNEL_POSITION_FRONT_LEFT;
    map.map[1] = PA_CHANNEL_POSITION_FRONT_RIGHT;
    assert(pa_channel_map_valid(&map));

    const pa_stream_flags_t flags = PA_STREAM_INTERPOLATE_TIMING
                                  | PA_STREAM_AUTO_TIMING_UPDATE
                                  | PA_STREAM_ADJUST_LATENCY
                                  | PA_STREAM_FIX_FORMAT
                                  | PA_STREAM_FIX_RATE
                                  /*| PA_STREAM_FIX_CHANNELS*/;

    const char *dev = NULL;
    if (demux->psz_location != NULL && demux->psz_location[0] != '\0')
        dev = demux->psz_location;

    struct pa_buffer_attr attr = {
        .maxlength = -1,
        .fragsize = pa_usec_to_bytes(sys->caching, &ss) / 2,
    };

    es_format_t fmt;

    /* Create record stream */
    pa_stream *s;
    pa_operation *op;

    pa_threaded_mainloop_lock(sys->mainloop);
    s = pa_stream_new(sys->context, "audio stream", &ss, &map);
    if (s == NULL)
        goto error;

    sys->stream = s;
    pa_stream_set_state_callback(s, stream_state_cb, sys->mainloop);
    pa_stream_set_read_callback(s, stream_read_cb, demux);
    pa_stream_set_buffer_attr_callback(s, stream_buffer_attr_cb, demux);
    pa_stream_set_moved_callback(s, stream_moved_cb, demux);
    pa_stream_set_overflow_callback(s, stream_overflow_cb, demux);
    pa_stream_set_started_callback(s, stream_started_cb, demux);
    pa_stream_set_suspended_callback(s, stream_suspended_cb, demux);
    pa_stream_set_underflow_callback(s, stream_underflow_cb, demux);

    if (pa_stream_connect_record(s, dev, &attr, flags) < 0
     || stream_wait(s, sys->mainloop)) {
        vlc_pa_error(obj, "cannot connect record stream", sys->context);
        goto error;
    }

    /* The ES should be initialized before stream_read_cb(), but how? */
    const struct pa_sample_spec *pss = pa_stream_get_sample_spec(s);
    if ((unsigned)pss->format >= sizeof (fourccs) / sizeof (fourccs[0])) {
        msg_Err(obj, "unknown PulseAudio sample format %u",
                (unsigned)pss->format);
        goto error;
    }

    vlc_fourcc_t format = fourccs[pss->format];
    if (format == 0) { /* FIXME: should renegotiate something else */
        msg_Err(obj, "unsupported PulseAudio sample format %u",
                (unsigned)pss->format);
        goto error;
    }

    es_format_Init(&fmt, AUDIO_ES, format);
    fmt.audio.i_physical_channels = fmt.audio.i_original_channels =
        AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT;
    fmt.audio.i_channels = ss.channels;
    fmt.audio.i_rate = pss->rate;
    fmt.audio.i_bitspersample = aout_BitsPerSample(format);
    fmt.audio.i_blockalign = fmt.audio.i_bitspersample * ss.channels / 8;
    fmt.i_bitrate = fmt.audio.i_bitspersample * ss.channels * pss->rate;
    sys->framesize = fmt.audio.i_blockalign;
    sys->es = es_out_Add (demux->out, &fmt);

    /* Update the buffer attributes according to actual format */
    attr.fragsize = pa_usec_to_bytes(sys->caching, pss) / 2;
    op = pa_stream_set_buffer_attr(s, &attr, stream_success_cb, sys->mainloop);
    if (likely(op != NULL)) {
        while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
            pa_threaded_mainloop_wait(sys->mainloop);
        pa_operation_unref(op);
    }
    stream_buffer_attr_cb(s, demux);
    pa_threaded_mainloop_unlock(sys->mainloop);

    demux->pf_demux = NULL;
    demux->pf_control = Control;
    return VLC_SUCCESS;

error:
    pa_threaded_mainloop_unlock(sys->mainloop);
    Close(obj);
    return VLC_EGENERIC;
}

static void Close (vlc_object_t *obj)
{
    demux_t *demux = (demux_t *)obj;
    demux_sys_t *sys = demux->p_sys;
    pa_stream *s = sys->stream;

    if (likely(s != NULL)) {
        pa_threaded_mainloop_lock(sys->mainloop);
        pa_stream_disconnect(s);
        pa_stream_set_state_callback(s, NULL, NULL);
        pa_stream_set_read_callback(s, NULL, NULL);
        pa_stream_set_buffer_attr_callback(s, NULL, NULL);
        pa_stream_set_moved_callback(s, NULL, NULL);
        pa_stream_set_overflow_callback(s, NULL, NULL);
        pa_stream_set_started_callback(s, NULL, NULL);
        pa_stream_set_suspended_callback(s, NULL, NULL);
        pa_stream_set_underflow_callback(s, NULL, NULL);
        pa_stream_unref(s);
        pa_threaded_mainloop_unlock(sys->mainloop);
    }

    vlc_pa_disconnect(obj, sys->context, sys->mainloop);
    free(sys);
}