Пример #1
0
static gboolean
gst_audio_aggregator_src_query (GstAggregator * agg, GstQuery * query)
{
  GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (agg);
  gboolean res = FALSE;

  switch (GST_QUERY_TYPE (query)) {
    case GST_QUERY_DURATION:
      res = gst_audio_aggregator_query_duration (aagg, query);
      break;
    case GST_QUERY_POSITION:
    {
      GstFormat format;

      gst_query_parse_position (query, &format, NULL);

      GST_OBJECT_LOCK (aagg);

      switch (format) {
        case GST_FORMAT_TIME:
          gst_query_set_position (query, format,
              gst_segment_to_stream_time (&agg->segment, GST_FORMAT_TIME,
                  agg->segment.position));
          res = TRUE;
          break;
        case GST_FORMAT_BYTES:
          if (GST_AUDIO_INFO_BPF (&aagg->info)) {
            gst_query_set_position (query, format, aagg->priv->offset *
                GST_AUDIO_INFO_BPF (&aagg->info));
            res = TRUE;
          }
          break;
        case GST_FORMAT_DEFAULT:
          gst_query_set_position (query, format, aagg->priv->offset);
          res = TRUE;
          break;
        default:
          break;
      }

      GST_OBJECT_UNLOCK (aagg);

      break;
    }
    default:
      res =
          GST_AGGREGATOR_CLASS (gst_audio_aggregator_parent_class)->src_query
          (agg, query);
      break;
  }

  return res;
}
Пример #2
0
static void
gst_ladspa_source_type_set_property (GObject * object, guint prop_id,
    const GValue * value, GParamSpec * pspec)
{
  GstLADSPASource *ladspa = GST_LADSPA_SOURCE (object);

  switch (prop_id) {
    case GST_LADSPA_SOURCE_PROP_SAMPLES_PER_BUFFER:
      ladspa->samples_per_buffer = g_value_get_int (value);
      gst_base_src_set_blocksize (GST_BASE_SRC (ladspa),
          GST_AUDIO_INFO_BPF (&ladspa->info) * ladspa->samples_per_buffer);
      break;
    case GST_LADSPA_SOURCE_PROP_IS_LIVE:
      gst_base_src_set_live (GST_BASE_SRC (ladspa),
          g_value_get_boolean (value));
      break;
    case GST_LADSPA_SOURCE_PROP_TIMESTAMP_OFFSET:
      ladspa->timestamp_offset = g_value_get_int64 (value);
      break;
    case GST_LADSPA_SOURCE_PROP_CAN_ACTIVATE_PUSH:
      GST_BASE_SRC (ladspa)->can_activate_push = g_value_get_boolean (value);
      break;
    case GST_LADSPA_SOURCE_PROP_CAN_ACTIVATE_PULL:
      ladspa->can_activate_pull = g_value_get_boolean (value);
      break;
    default:
      gst_ladspa_object_set_property (&ladspa->ladspa, object, prop_id, value,
          pspec);
      break;
  }
}
Пример #3
0
static gboolean
gst_lv2_source_set_caps (GstBaseSrc * base, GstCaps * caps)
{
  GstLV2Source *lv2 = (GstLV2Source *) base;
  GstAudioInfo info;

  if (!gst_audio_info_from_caps (&info, caps)) {
    GST_ERROR_OBJECT (base, "received invalid caps");
    return FALSE;
  }

  GST_DEBUG_OBJECT (lv2, "negotiated to caps %" GST_PTR_FORMAT, caps);

  lv2->info = info;

  gst_base_src_set_blocksize (base,
      GST_AUDIO_INFO_BPF (&info) * lv2->samples_per_buffer);

  if (!gst_lv2_setup (&lv2->lv2, GST_AUDIO_INFO_RATE (&info)))
    goto no_instance;

  return TRUE;

no_instance:
  {
    GST_ERROR_OBJECT (lv2, "could not create instance");
    return FALSE;
  }
}
Пример #4
0
/* GObject vmethods implementation */
static void
gst_lv2_source_set_property (GObject * object, guint prop_id,
    const GValue * value, GParamSpec * pspec)
{
  GstLV2Source *self = (GstLV2Source *) object;

  switch (prop_id) {
    case GST_LV2_SOURCE_PROP_SAMPLES_PER_BUFFER:
      self->samples_per_buffer = g_value_get_int (value);
      gst_base_src_set_blocksize (GST_BASE_SRC (self),
          GST_AUDIO_INFO_BPF (&self->info) * self->samples_per_buffer);
      break;
    case GST_LV2_SOURCE_PROP_IS_LIVE:
      gst_base_src_set_live (GST_BASE_SRC (self), g_value_get_boolean (value));
      break;
    case GST_LV2_SOURCE_PROP_TIMESTAMP_OFFSET:
      self->timestamp_offset = g_value_get_int64 (value);
      break;
    case GST_LV2_SOURCE_PROP_CAN_ACTIVATE_PUSH:
      GST_BASE_SRC (self)->can_activate_push = g_value_get_boolean (value);
      break;
    case GST_LV2_SOURCE_PROP_CAN_ACTIVATE_PULL:
      self->can_activate_pull = g_value_get_boolean (value);
      break;
    default:
      gst_lv2_object_set_property (&self->lv2, object, prop_id, value, pspec);
      break;
  }
}
Пример #5
0
static gboolean
gst_audio_test_src_setcaps (GstBaseSrc * basesrc, GstCaps * caps)
{
  GstAudioTestSrc *src = GST_AUDIO_TEST_SRC (basesrc);
  GstAudioInfo info;

  if (!gst_audio_info_from_caps (&info, caps))
    goto invalid_caps;

  GST_DEBUG_OBJECT (src, "negotiated to caps %" GST_PTR_FORMAT, caps);

  src->info = info;

  gst_base_src_set_blocksize (basesrc,
      GST_AUDIO_INFO_BPF (&info) * src->samples_per_buffer);
  gst_audio_test_src_change_wave (src);

  return TRUE;

  /* ERROR */
invalid_caps:
  {
    GST_ERROR_OBJECT (basesrc, "received invalid caps");
    return FALSE;
  }
}
static gboolean
gst_audio_fx_base_fir_filter_transform_size (GstBaseTransform * base,
    GstPadDirection direction, GstCaps * caps, gsize size, GstCaps * othercaps,
    gsize * othersize)
{
  GstAudioFXBaseFIRFilter *self = GST_AUDIO_FX_BASE_FIR_FILTER (base);
  guint blocklen;
  GstAudioInfo info;
  gint bpf;

  if (!self->fft || self->low_latency || direction == GST_PAD_SRC) {
    *othersize = size;
    return TRUE;
  }

  if (!gst_audio_info_from_caps (&info, caps))
    return FALSE;

  bpf = GST_AUDIO_INFO_BPF (&info);

  size /= bpf;
  blocklen = self->block_length - self->kernel_length + 1;
  *othersize = ((size + blocklen - 1) / blocklen) * blocklen;
  *othersize *= bpf;

  return TRUE;
}
Пример #7
0
/* seek to time, will be called when we operate in push mode. In pull mode we
 * get the requested byte offset. */
static gboolean
gst_lv2_source_do_seek (GstBaseSrc * base, GstSegment * segment)
{
  GstLV2Source *lv2 = (GstLV2Source *) base;
  GstClockTime time;
  gint samplerate, bpf;
  gint64 next_sample;

  GST_DEBUG_OBJECT (lv2, "seeking %" GST_SEGMENT_FORMAT, segment);

  time = segment->position;
  lv2->reverse = (segment->rate < 0.0);

  samplerate = GST_AUDIO_INFO_RATE (&lv2->info);
  bpf = GST_AUDIO_INFO_BPF (&lv2->info);

  /* now move to the time indicated, don't seek to the sample *after* the time */
  next_sample = gst_util_uint64_scale_int (time, samplerate, GST_SECOND);
  lv2->next_byte = next_sample * bpf;
  if (samplerate == 0)
    lv2->next_time = 0;
  else
    lv2->next_time =
        gst_util_uint64_scale_round (next_sample, GST_SECOND, samplerate);

  GST_DEBUG_OBJECT (lv2, "seeking next_sample=%" G_GINT64_FORMAT
      " next_time=%" GST_TIME_FORMAT, next_sample,
      GST_TIME_ARGS (lv2->next_time));

  g_assert (lv2->next_time <= time);

  lv2->next_sample = next_sample;

  if (!lv2->reverse) {
    if (GST_CLOCK_TIME_IS_VALID (segment->start)) {
      segment->time = segment->start;
    }
  } else {
    if (GST_CLOCK_TIME_IS_VALID (segment->stop)) {
      segment->time = segment->stop;
    }
  }

  if (GST_CLOCK_TIME_IS_VALID (segment->stop)) {
    time = segment->stop;
    lv2->sample_stop =
        gst_util_uint64_scale_round (time, samplerate, GST_SECOND);
    lv2->check_seek_stop = TRUE;
  } else {
    lv2->check_seek_stop = FALSE;
  }
  lv2->eos_reached = FALSE;

  return TRUE;
}
Пример #8
0
static gboolean
gst_alsasrc_prepare (GstAudioSrc * asrc, GstAudioRingBufferSpec * spec)
{
  GstAlsaSrc *alsa;
  gint err;

  alsa = GST_ALSA_SRC (asrc);

  if (!alsasrc_parse_spec (alsa, spec))
    goto spec_parse;

  CHECK (snd_pcm_nonblock (alsa->handle, 0), non_block);

  CHECK (set_hwparams (alsa), hw_params_failed);
  CHECK (set_swparams (alsa), sw_params_failed);
  CHECK (snd_pcm_prepare (alsa->handle), prepare_failed);

  alsa->bpf = GST_AUDIO_INFO_BPF (&spec->info);
  spec->segsize = alsa->period_size * alsa->bpf;
  spec->segtotal = alsa->buffer_size / alsa->period_size;

  return TRUE;

  /* ERRORS */
spec_parse:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Error parsing spec"));
    return FALSE;
  }
non_block:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Could not set device to blocking: %s", snd_strerror (err)));
    return FALSE;
  }
hw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of hwparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
sw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of swparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
prepare_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Prepare failed: %s", snd_strerror (err)));
    return FALSE;
  }
}
static gboolean
gst_decklink_audio_sink_ringbuffer_acquire (GstAudioRingBuffer * rb,
    GstAudioRingBufferSpec * spec)
{
  GstDecklinkAudioSinkRingBuffer *self =
      GST_DECKLINK_AUDIO_SINK_RING_BUFFER_CAST (rb);
  HRESULT ret;
  BMDAudioSampleType sample_depth;

  GST_DEBUG_OBJECT (self->sink, "Acquire");

  if (spec->info.finfo->format == GST_AUDIO_FORMAT_S16LE) {
    sample_depth = bmdAudioSampleType16bitInteger;
  } else {
    sample_depth = bmdAudioSampleType32bitInteger;
  }

  ret = self->output->output->EnableAudioOutput (bmdAudioSampleRate48kHz,
      sample_depth, 2, bmdAudioOutputStreamContinuous);
  if (ret != S_OK) {
    GST_WARNING_OBJECT (self->sink, "Failed to enable audio output 0x%08x",
        ret);
    return FALSE;
  }

  g_mutex_lock (&self->output->lock);
  self->output->audio_enabled = TRUE;
  if (self->output->start_scheduled_playback)
    self->output->start_scheduled_playback (self->output->videosink);
  g_mutex_unlock (&self->output->lock);

  ret =
      self->output->
      output->SetAudioCallback (new GStreamerAudioOutputCallback (self));
  if (ret != S_OK) {
    GST_WARNING_OBJECT (self->sink,
        "Failed to set audio output callback 0x%08x", ret);
    return FALSE;
  }

  spec->segsize =
      (spec->latency_time * GST_AUDIO_INFO_RATE (&spec->info) /
      G_USEC_PER_SEC) * GST_AUDIO_INFO_BPF (&spec->info);
  spec->segtotal = spec->buffer_time / spec->latency_time;
  // set latency to one more segment as we need some headroom
  spec->seglatency = spec->segtotal + 1;

  rb->size = spec->segtotal * spec->segsize;
  rb->memory = (guint8 *) g_malloc0 (rb->size);

  return TRUE;
}
Пример #10
0
static GstBuffer *
gst_audio_aggregator_create_output_buffer (GstAudioAggregator * aagg,
    guint num_frames)
{
  GstBuffer *outbuf = gst_buffer_new_allocate (NULL, num_frames *
      GST_AUDIO_INFO_BPF (&aagg->info), NULL);
  GstMapInfo outmap;

  gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE);
  gst_audio_format_fill_silence (aagg->info.finfo, outmap.data, outmap.size);
  gst_buffer_unmap (outbuf, &outmap);

  return outbuf;
}
Пример #11
0
static gboolean
gst_bml_transform_get_unit_size (GstBaseTransform * base, GstCaps * caps,
    gsize * size)
{
  GstAudioInfo info;

  g_assert (size);

  if (!gst_audio_info_from_caps (&info, caps))
    return FALSE;

  *size = GST_AUDIO_INFO_BPF (&info);

  return TRUE;
}
Пример #12
0
static GstBuffer *
gst_audio_aggregator_do_clip (GstAggregator * agg,
    GstAggregatorPad * bpad, GstBuffer * buffer)
{
  GstAudioAggregatorPad *pad = GST_AUDIO_AGGREGATOR_PAD (bpad);
  gint rate, bpf;

  rate = GST_AUDIO_INFO_RATE (&pad->info);
  bpf = GST_AUDIO_INFO_BPF (&pad->info);

  GST_OBJECT_LOCK (bpad);
  buffer = gst_audio_buffer_clip (buffer, &bpad->segment, rate, bpf);
  GST_OBJECT_UNLOCK (bpad);

  return buffer;
}
static gboolean
gst_freeverb_get_unit_size (GstBaseTransform * base, GstCaps * caps,
                            gsize * size)
{
    GstAudioInfo info;

    g_assert (size);

    if (!gst_audio_info_from_caps (&info, caps))
        return FALSE;

    *size = GST_AUDIO_INFO_BPF (&info);

    GST_INFO_OBJECT (base, "unit size: %" G_GSIZE_FORMAT, *size);

    return TRUE;
}
  virtual HRESULT RenderAudioSamples (bool preroll)
  {
    guint8 *ptr;
    gint seg;
    gint len;
    gint bpf;
    guint written, written_sum;
    HRESULT res;

    GST_LOG_OBJECT (m_ringbuffer->sink, "Writing audio samples (preroll: %d)",
        preroll);

    if (!gst_audio_ring_buffer_prepare_read (GST_AUDIO_RING_BUFFER_CAST
            (m_ringbuffer), &seg, &ptr, &len)) {
      GST_WARNING_OBJECT (m_ringbuffer->sink, "No segment available");
      return E_FAIL;
    }

    bpf =
        GST_AUDIO_INFO_BPF (&GST_AUDIO_RING_BUFFER_CAST (m_ringbuffer)->
        spec.info);
    len /= bpf;
    GST_LOG_OBJECT (m_ringbuffer->sink,
        "Write audio samples: %p size %d segment: %d", ptr, len, seg);

    written_sum = 0;
    do {
      res =
          m_ringbuffer->output->output->ScheduleAudioSamples (ptr, len,
          0, 0, &written);
      len -= written;
      ptr += written * bpf;
      written_sum += written;
    } while (len > 0 && res == S_OK);

    GST_LOG_OBJECT (m_ringbuffer->sink, "Wrote %u samples: 0x%08x", written_sum,
        res);

    gst_audio_ring_buffer_clear (GST_AUDIO_RING_BUFFER_CAST (m_ringbuffer),
        seg);
    gst_audio_ring_buffer_advance (GST_AUDIO_RING_BUFFER_CAST (m_ringbuffer),
        1);

    return res;
  }
Пример #15
0
static gboolean
gst_ladspa_source_type_set_caps (GstBaseSrc * base, GstCaps * caps)
{
  GstLADSPASource *ladspa = GST_LADSPA_SOURCE (base);
  GstAudioInfo info;

  if (!gst_audio_info_from_caps (&info, caps)) {
    GST_ERROR_OBJECT (base, "received invalid caps");
    return FALSE;
  }

  GST_DEBUG_OBJECT (ladspa, "negotiated to caps %" GST_PTR_FORMAT, caps);

  ladspa->info = info;

  gst_base_src_set_blocksize (base,
      GST_AUDIO_INFO_BPF (&info) * ladspa->samples_per_buffer);

  return gst_ladspa_setup (&ladspa->ladspa, GST_AUDIO_INFO_RATE (&info));
}
Пример #16
0
static GstBuffer *
gst_audio_aggregator_create_output_buffer (GstAudioAggregator * aagg,
    guint num_frames)
{
  GstAllocator *allocator;
  GstAllocationParams params;
  GstBuffer *outbuf;
  GstMapInfo outmap;

  gst_aggregator_get_allocator (GST_AGGREGATOR (aagg), &allocator, &params);

  outbuf = gst_buffer_new_allocate (allocator, num_frames *
      GST_AUDIO_INFO_BPF (&aagg->info), &params);

  if (allocator)
    gst_object_unref (allocator);

  gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE);
  gst_audio_format_fill_silence (aagg->info.finfo, outmap.data, outmap.size);
  gst_buffer_unmap (outbuf, &outmap);

  return outbuf;
}
Пример #17
0
static gboolean
gst_directsound_src_prepare (GstAudioSrc * asrc, GstAudioRingBufferSpec * spec)
{
  GstDirectSoundSrc *dsoundsrc;
  WAVEFORMATEX wfx;             /* Wave format structure */
  HRESULT hRes;                 /* Result for windows functions */
  DSCBUFFERDESC descSecondary;  /* Capturebuffer description */

  dsoundsrc = GST_DIRECTSOUND_SRC (asrc);

  GST_DEBUG_OBJECT (asrc, "preparing directsoundsrc");

  /* Define buffer */
  memset (&wfx, 0, sizeof (WAVEFORMATEX));
  wfx.wFormatTag = WAVE_FORMAT_PCM;
  wfx.nChannels = GST_AUDIO_INFO_CHANNELS (&spec->info);
  wfx.nSamplesPerSec = GST_AUDIO_INFO_RATE (&spec->info);
  wfx.wBitsPerSample = GST_AUDIO_INFO_BPF (&spec->info) * 8 / wfx.nChannels;
  wfx.nBlockAlign = GST_AUDIO_INFO_BPF (&spec->info);
  wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
  /* Ignored for WAVE_FORMAT_PCM. */
  wfx.cbSize = 0;

  if (wfx.wBitsPerSample != 16 && wfx.wBitsPerSample != 8)
    goto dodgy_width;

  /* Set the buffer size to two seconds. 
     This should never reached. 
   */
  dsoundsrc->buffer_size = wfx.nAvgBytesPerSec * 2;

  GST_DEBUG_OBJECT (asrc, "Buffer size: %d", dsoundsrc->buffer_size);

  /* Init secondary buffer desciption */
  memset (&descSecondary, 0, sizeof (DSCBUFFERDESC));
  descSecondary.dwSize = sizeof (DSCBUFFERDESC);
  descSecondary.dwFlags = 0;
  descSecondary.dwReserved = 0;

  /* This is not primary buffer so have to set size  */
  descSecondary.dwBufferBytes = dsoundsrc->buffer_size;
  descSecondary.lpwfxFormat = &wfx;

  /* Create buffer */
  hRes = IDirectSoundCapture_CreateCaptureBuffer (dsoundsrc->pDSC,
      &descSecondary, &dsoundsrc->pDSBSecondary, NULL);
  if (hRes != DS_OK)
    goto capture_buffer;

  dsoundsrc->bytes_per_sample = GST_AUDIO_INFO_BPF (&spec->info);

  GST_DEBUG ("latency time: %" G_GUINT64_FORMAT " - buffer time: %"
      G_GUINT64_FORMAT, spec->latency_time, spec->buffer_time);

  /* Buffer-time should be always more than 2*latency */
  if (spec->buffer_time < spec->latency_time * 2) {
    spec->buffer_time = spec->latency_time * 2;
    GST_WARNING ("buffer-time was less than latency");
  }

  /* Save the times */
  dsoundsrc->buffer_time = spec->buffer_time;
  dsoundsrc->latency_time = spec->latency_time;

  dsoundsrc->latency_size = (gint) wfx.nAvgBytesPerSec *
      dsoundsrc->latency_time / 1000000.0;

  spec->segsize = (guint) (((double) spec->buffer_time / 1000000.0) *
      wfx.nAvgBytesPerSec);

  /* just in case */
  if (spec->segsize < 1)
    spec->segsize = 1;

  spec->segtotal = GST_AUDIO_INFO_BPF (&spec->info) * 8 *
      (wfx.nAvgBytesPerSec / spec->segsize);

  GST_DEBUG_OBJECT (asrc,
      "bytes/sec: %lu, buffer size: %d, segsize: %d, segtotal: %d",
      wfx.nAvgBytesPerSec, dsoundsrc->buffer_size, spec->segsize,
      spec->segtotal);

  /* Not read anything yet */
  dsoundsrc->current_circular_offset = 0;

  GST_DEBUG_OBJECT (asrc, "channels: %d, rate: %d, bytes_per_sample: %d"
      " WAVEFORMATEX.nSamplesPerSec: %ld, WAVEFORMATEX.wBitsPerSample: %d,"
      " WAVEFORMATEX.nBlockAlign: %d, WAVEFORMATEX.nAvgBytesPerSec: %ld",
      GST_AUDIO_INFO_CHANNELS (&spec->info), GST_AUDIO_INFO_RATE (&spec->info),
      GST_AUDIO_INFO_BPF (&spec->info), wfx.nSamplesPerSec, wfx.wBitsPerSample,
      wfx.nBlockAlign, wfx.nAvgBytesPerSec);

  return TRUE;

capture_buffer:
  {
    GST_ELEMENT_ERROR (dsoundsrc, RESOURCE, OPEN_READ,
        ("Unable to create capturebuffer"), (NULL));
    return FALSE;
  }
dodgy_width:
  {
    GST_ELEMENT_ERROR (dsoundsrc, RESOURCE, OPEN_READ,
        ("Unexpected width %d", wfx.wBitsPerSample), (NULL));
    return FALSE;
  }
}
Пример #18
0
static gboolean
gst_audiomixer_aggregate_one_buffer (GstAudioAggregator * aagg,
    GstAudioAggregatorPad * aaggpad, GstBuffer * inbuf, guint in_offset,
    GstBuffer * outbuf, guint out_offset, guint num_frames)
{
  GstAudioMixerPad *pad = GST_AUDIO_MIXER_PAD (aaggpad);
  GstMapInfo inmap;
  GstMapInfo outmap;
  gint bpf;

  GST_OBJECT_LOCK (aagg);
  GST_OBJECT_LOCK (aaggpad);

  if (pad->mute || pad->volume < G_MINDOUBLE) {
    GST_DEBUG_OBJECT (pad, "Skipping muted pad");
    GST_OBJECT_UNLOCK (aaggpad);
    GST_OBJECT_UNLOCK (aagg);
    return FALSE;
  }

  bpf = GST_AUDIO_INFO_BPF (&aagg->info);

  gst_buffer_map (outbuf, &outmap, GST_MAP_READWRITE);
  gst_buffer_map (inbuf, &inmap, GST_MAP_READ);
  GST_LOG_OBJECT (pad, "mixing %u bytes at offset %u from offset %u",
      num_frames * bpf, out_offset * bpf, in_offset * bpf);

  /* further buffers, need to add them */
  if (pad->volume == 1.0) {
    switch (aagg->info.finfo->format) {
      case GST_AUDIO_FORMAT_U8:
        audiomixer_orc_add_u8 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_S8:
        audiomixer_orc_add_s8 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_U16:
        audiomixer_orc_add_u16 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_S16:
        audiomixer_orc_add_s16 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_U32:
        audiomixer_orc_add_u32 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_S32:
        audiomixer_orc_add_s32 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_F32:
        audiomixer_orc_add_f32 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_F64:
        audiomixer_orc_add_f64 ((gpointer) (outmap.data + out_offset * bpf),
            (gpointer) (inmap.data + in_offset * bpf),
            num_frames * aagg->info.channels);
        break;
      default:
        g_assert_not_reached ();
        break;
    }
  } else {
    switch (aagg->info.finfo->format) {
      case GST_AUDIO_FORMAT_U8:
        audiomixer_orc_add_volume_u8 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume_i8, num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_S8:
        audiomixer_orc_add_volume_s8 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume_i8, num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_U16:
        audiomixer_orc_add_volume_u16 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume_i16, num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_S16:
        audiomixer_orc_add_volume_s16 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume_i16, num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_U32:
        audiomixer_orc_add_volume_u32 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume_i32, num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_S32:
        audiomixer_orc_add_volume_s32 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume_i32, num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_F32:
        audiomixer_orc_add_volume_f32 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume, num_frames * aagg->info.channels);
        break;
      case GST_AUDIO_FORMAT_F64:
        audiomixer_orc_add_volume_f64 ((gpointer) (outmap.data +
                out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
            pad->volume, num_frames * aagg->info.channels);
        break;
      default:
        g_assert_not_reached ();
        break;
    }
  }
  gst_buffer_unmap (inbuf, &inmap);
  gst_buffer_unmap (outbuf, &outmap);

  GST_OBJECT_UNLOCK (aaggpad);
  GST_OBJECT_UNLOCK (aagg);

  return TRUE;
}
Пример #19
0
/* allocate a buffer and setup resources to process the audio samples of
 * the format as specified in @spec.
 *
 * We allocate N jack ports, one for each channel. If we are asked to
 * automatically make a connection with physical ports, we connect as many
 * ports as there are physical ports, leaving leftover ports unconnected.
 *
 * It is assumed that samplerate and number of channels are acceptable since our
 * getcaps method will always provide correct values. If unacceptable caps are
 * received for some reason, we fail here.
 */
static gboolean
gst_jack_ring_buffer_acquire (GstAudioRingBuffer * buf,
    GstAudioRingBufferSpec * spec)
{
  GstJackAudioSink *sink;
  GstJackRingBuffer *abuf;
  const char **ports;
  gint sample_rate, buffer_size;
  gint i, rate, bpf, channels, res;
  jack_client_t *client;

  sink = GST_JACK_AUDIO_SINK (GST_OBJECT_PARENT (buf));
  abuf = GST_JACK_RING_BUFFER_CAST (buf);

  GST_DEBUG_OBJECT (sink, "acquire");

  client = gst_jack_audio_client_get_client (sink->client);

  rate = GST_AUDIO_INFO_RATE (&spec->info);

  /* sample rate must be that of the server */
  sample_rate = jack_get_sample_rate (client);
  if (sample_rate != rate)
    goto wrong_samplerate;

  channels = GST_AUDIO_INFO_CHANNELS (&spec->info);
  bpf = GST_AUDIO_INFO_BPF (&spec->info);

  if (!gst_jack_audio_sink_allocate_channels (sink, channels))
    goto out_of_ports;

  buffer_size = jack_get_buffer_size (client);

  /* the segment size in bytes, this is large enough to hold a buffer of 32bit floats
   * for all channels  */
  spec->segsize = buffer_size * sizeof (gfloat) * channels;
  spec->latency_time = gst_util_uint64_scale (spec->segsize,
      (GST_SECOND / GST_USECOND), rate * bpf);
  /* segtotal based on buffer-time latency */
  spec->segtotal = spec->buffer_time / spec->latency_time;
  if (spec->segtotal < 2) {
    spec->segtotal = 2;
    spec->buffer_time = spec->latency_time * spec->segtotal;
  }

  GST_DEBUG_OBJECT (sink, "buffer time: %" G_GINT64_FORMAT " usec",
      spec->buffer_time);
  GST_DEBUG_OBJECT (sink, "latency time: %" G_GINT64_FORMAT " usec",
      spec->latency_time);
  GST_DEBUG_OBJECT (sink, "buffer_size %d, segsize %d, segtotal %d",
      buffer_size, spec->segsize, spec->segtotal);

  /* allocate the ringbuffer memory now */
  buf->size = spec->segtotal * spec->segsize;
  buf->memory = g_malloc0 (buf->size);

  if ((res = gst_jack_audio_client_set_active (sink->client, TRUE)))
    goto could_not_activate;

  /* if we need to automatically connect the ports, do so now. We must do this
   * after activating the client. */
  if (sink->connect == GST_JACK_CONNECT_AUTO
      || sink->connect == GST_JACK_CONNECT_AUTO_FORCED) {
    /* find all the physical input ports. A physical input port is a port
     * associated with a hardware device. Someone needs connect to a physical
     * port in order to hear something. */
    ports = jack_get_ports (client, NULL, NULL,
        JackPortIsPhysical | JackPortIsInput);
    if (ports == NULL) {
      /* no ports? fine then we don't do anything except for posting a warning
       * message. */
      GST_ELEMENT_WARNING (sink, RESOURCE, NOT_FOUND, (NULL),
          ("No physical input ports found, leaving ports unconnected"));
      goto done;
    }

    for (i = 0; i < channels; i++) {
      /* stop when all input ports are exhausted */
      if (ports[i] == NULL) {
        /* post a warning that we could not connect all ports */
        GST_ELEMENT_WARNING (sink, RESOURCE, NOT_FOUND, (NULL),
            ("No more physical ports, leaving some ports unconnected"));
        break;
      }
      GST_DEBUG_OBJECT (sink, "try connecting to %s",
          jack_port_name (sink->ports[i]));
      /* connect the port to a physical port */
      res = jack_connect (client, jack_port_name (sink->ports[i]), ports[i]);
      if (res != 0 && res != EEXIST)
        goto cannot_connect;
    }
    free (ports);
  }
done:

  abuf->sample_rate = sample_rate;
  abuf->buffer_size = buffer_size;
  abuf->channels = channels;

  return TRUE;

  /* ERRORS */
wrong_samplerate:
  {
    GST_ELEMENT_ERROR (sink, RESOURCE, SETTINGS, (NULL),
        ("Wrong samplerate, server is running at %d and we received %d",
            sample_rate, rate));
    return FALSE;
  }
out_of_ports:
  {
    GST_ELEMENT_ERROR (sink, RESOURCE, SETTINGS, (NULL),
        ("Cannot allocate more Jack ports"));
    return FALSE;
  }
could_not_activate:
  {
    GST_ELEMENT_ERROR (sink, RESOURCE, SETTINGS, (NULL),
        ("Could not activate client (%d:%s)", res, g_strerror (res)));
    return FALSE;
  }
cannot_connect:
  {
    GST_ELEMENT_ERROR (sink, RESOURCE, SETTINGS, (NULL),
        ("Could not connect output ports to physical ports (%d:%s)",
            res, g_strerror (res)));
    free (ports);
    return FALSE;
  }
}
Пример #20
0
static gboolean
gst_alsasink_prepare (GstAudioSink * asink, GstAudioRingBufferSpec * spec)
{
  GstAlsaSink *alsa;
  gint err;

  alsa = GST_ALSA_SINK (asink);

  if (alsa->iec958) {
    snd_pcm_close (alsa->handle);
    alsa->handle = gst_alsa_open_iec958_pcm (GST_OBJECT (alsa), alsa->device);
    if (G_UNLIKELY (!alsa->handle)) {
      goto no_iec958;
    }
  }

  if (!alsasink_parse_spec (alsa, spec))
    goto spec_parse;

  CHECK (set_hwparams (alsa), hw_params_failed);
  CHECK (set_swparams (alsa), sw_params_failed);

  alsa->bpf = GST_AUDIO_INFO_BPF (&spec->info);
  spec->segsize = alsa->period_size * alsa->bpf;
  spec->segtotal = alsa->buffer_size / alsa->period_size;

  {
    snd_output_t *out_buf = NULL;
    char *msg = NULL;

    snd_output_buffer_open (&out_buf);
    snd_pcm_dump_hw_setup (alsa->handle, out_buf);
    snd_output_buffer_string (out_buf, &msg);
    GST_DEBUG_OBJECT (alsa, "Hardware setup: \n%s", msg);
    snd_output_close (out_buf);
    snd_output_buffer_open (&out_buf);
    snd_pcm_dump_sw_setup (alsa->handle, out_buf);
    snd_output_buffer_string (out_buf, &msg);
    GST_DEBUG_OBJECT (alsa, "Software setup: \n%s", msg);
    snd_output_close (out_buf);
  }

#ifdef SND_CHMAP_API_VERSION
  alsa_detect_channels_mapping (GST_OBJECT (alsa), alsa->handle, spec,
      alsa->channels, GST_AUDIO_BASE_SINK (alsa)->ringbuffer);
#endif /* SND_CHMAP_API_VERSION */

  return TRUE;

  /* ERRORS */
no_iec958:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, OPEN_WRITE, (NULL),
        ("Could not open IEC958 (SPDIF) device for playback"));
    return FALSE;
  }
spec_parse:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Error parsing spec"));
    return FALSE;
  }
hw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of hwparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
sw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of swparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
}
Пример #21
0
static GstFlowReturn
gst_cutter_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstCutter *filter;
  GstMapInfo map;
  gint16 *in_data;
  gint bpf, rate;
  gsize in_size;
  guint num_samples;
  gdouble NCS = 0.0;            /* Normalized Cumulative Square of buffer */
  gdouble RMS = 0.0;            /* RMS of signal in buffer */
  gdouble NMS = 0.0;            /* Normalized Mean Square of buffer */
  GstBuffer *prebuf;            /* pointer to a prebuffer element */
  GstClockTime duration;

  filter = GST_CUTTER (parent);

  if (GST_AUDIO_INFO_FORMAT (&filter->info) == GST_AUDIO_FORMAT_UNKNOWN)
    goto not_negotiated;

  bpf = GST_AUDIO_INFO_BPF (&filter->info);
  rate = GST_AUDIO_INFO_RATE (&filter->info);

  gst_buffer_map (buf, &map, GST_MAP_READ);
  in_data = (gint16 *) map.data;
  in_size = map.size;

  GST_LOG_OBJECT (filter, "length of prerec buffer: %" GST_TIME_FORMAT,
      GST_TIME_ARGS (filter->pre_run_length));

  /* calculate mean square value on buffer */
  switch (GST_AUDIO_INFO_FORMAT (&filter->info)) {
    case GST_AUDIO_FORMAT_S16:
      num_samples = in_size / 2;
      gst_cutter_calculate_gint16 (in_data, num_samples, &NCS);
      NMS = NCS / num_samples;
      break;
    case GST_AUDIO_FORMAT_S8:
      num_samples = in_size;
      gst_cutter_calculate_gint8 ((gint8 *) in_data, num_samples, &NCS);
      NMS = NCS / num_samples;
      break;
    default:
      /* this shouldn't happen */
      g_warning ("no mean square function for format");
      break;
  }

  gst_buffer_unmap (buf, &map);

  filter->silent_prev = filter->silent;

  duration = gst_util_uint64_scale (in_size / bpf, GST_SECOND, rate);

  RMS = sqrt (NMS);
  /* if RMS below threshold, add buffer length to silent run length count
   * if not, reset
   */
  GST_LOG_OBJECT (filter, "buffer stats: NMS %f, RMS %f, audio length %f", NMS,
      RMS, gst_guint64_to_gdouble (duration));

  if (RMS < filter->threshold_level)
    filter->silent_run_length += gst_guint64_to_gdouble (duration);
  else {
    filter->silent_run_length = 0 * GST_SECOND;
    filter->silent = FALSE;
  }

  if (filter->silent_run_length > filter->threshold_length)
    /* it has been silent long enough, flag it */
    filter->silent = TRUE;

  /* has the silent status changed ? if so, send right signal
   * and, if from silent -> not silent, flush pre_record buffer
   */
  if (filter->silent != filter->silent_prev) {
    if (filter->silent) {
      GstMessage *m =
          gst_cutter_message_new (filter, FALSE, GST_BUFFER_TIMESTAMP (buf));
      GST_DEBUG_OBJECT (filter, "signaling CUT_STOP");
      gst_element_post_message (GST_ELEMENT (filter), m);
    } else {
      gint count = 0;
      GstMessage *m =
          gst_cutter_message_new (filter, TRUE, GST_BUFFER_TIMESTAMP (buf));

      GST_DEBUG_OBJECT (filter, "signaling CUT_START");
      gst_element_post_message (GST_ELEMENT (filter), m);
      /* first of all, flush current buffer */
      GST_DEBUG_OBJECT (filter, "flushing buffer of length %" GST_TIME_FORMAT,
          GST_TIME_ARGS (filter->pre_run_length));

      while (filter->pre_buffer) {
        prebuf = (g_list_first (filter->pre_buffer))->data;
        filter->pre_buffer = g_list_remove (filter->pre_buffer, prebuf);
        gst_pad_push (filter->srcpad, prebuf);
        ++count;
      }
      GST_DEBUG_OBJECT (filter, "flushed %d buffers", count);
      filter->pre_run_length = 0 * GST_SECOND;
    }
  }
  /* now check if we have to send the new buffer to the internal buffer cache
   * or to the srcpad */
  if (filter->silent) {
    filter->pre_buffer = g_list_append (filter->pre_buffer, buf);
    filter->pre_run_length += gst_guint64_to_gdouble (duration);

    while (filter->pre_run_length > filter->pre_length) {
      GstClockTime pduration;
      gsize psize;

      prebuf = (g_list_first (filter->pre_buffer))->data;
      g_assert (GST_IS_BUFFER (prebuf));

      psize = gst_buffer_get_size (prebuf);
      pduration = gst_util_uint64_scale (psize / bpf, GST_SECOND, rate);

      filter->pre_buffer = g_list_remove (filter->pre_buffer, prebuf);
      filter->pre_run_length -= gst_guint64_to_gdouble (pduration);

      /* only pass buffers if we don't leak */
      if (!filter->leaky)
        ret = gst_pad_push (filter->srcpad, prebuf);
      else
        gst_buffer_unref (prebuf);
    }
  } else
    ret = gst_pad_push (filter->srcpad, buf);

  return ret;

  /* ERRORS */
not_negotiated:
  {
    return GST_FLOW_NOT_NEGOTIATED;
  }
}
Пример #22
0
static void
run_decoding_test (GstElement * mpg123audiodec, gchar const *filename)
{
  GstBus *bus;
  unsigned int num_input_buffers, num_decoded_buffers;
  gint expected_size;
  GstCaps *out_caps, *caps;
  GstAudioInfo audioinfo;
  GstElement *input_pipeline, *input_appsink;
  int i;
  GstBuffer *outbuffer;

  /* 440 Hz = frequency of sine wave in audio data
   * 44100 Hz = sample rate
   * (44100 / 2) Hz = Nyquist frequency */
  static double const expected_frequency_spot = 440.0 / (44100.0 / 2.0);

  fail_unless (gst_element_set_state (mpg123audiodec,
          GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS,
      "could not set to playing");
  bus = gst_bus_new ();

  gst_element_set_bus (mpg123audiodec, bus);

  setup_input_pipeline (filename, &input_pipeline, &input_appsink);

  num_input_buffers = 0;
  while (TRUE) {
    GstSample *sample;
    GstBuffer *input_buffer;

    sample = gst_app_sink_pull_sample (GST_APP_SINK (input_appsink));
    if (sample == NULL)
      break;

    fail_unless (GST_IS_SAMPLE (sample));

    input_buffer = gst_sample_get_buffer (sample);
    fail_if (input_buffer == NULL);

    /* This is done to be on the safe side - docs say lifetime of the input buffer
     * depends *solely* on the sample */
    input_buffer = gst_buffer_copy (input_buffer);

    fail_unless_equals_int (gst_pad_push (mysrcpad, input_buffer), GST_FLOW_OK);

    ++num_input_buffers;

    gst_sample_unref (sample);
  }

  num_decoded_buffers = g_list_length (buffers);

  /* check number of decoded buffers */
  fail_unless_equals_int (num_decoded_buffers, num_input_buffers - 2);

  caps = gst_pad_get_current_caps (mysinkpad);
  GST_LOG ("output caps %" GST_PTR_FORMAT, caps);
  fail_unless (gst_audio_info_from_caps (&audioinfo, caps),
      "Getting audio info from caps failed");

  /* check caps */
  out_caps = gst_caps_new_simple ("audio/x-raw",
      "format", G_TYPE_STRING, "S32LE",
      "layout", G_TYPE_STRING, "interleaved",
      "rate", G_TYPE_INT, 44100, "channels", G_TYPE_INT, 1, NULL);

  fail_unless (gst_caps_is_equal_fixed (caps, out_caps), "Incorrect out caps");

  gst_caps_unref (out_caps);
  gst_caps_unref (caps);

  /* here, test if decoded data is a sine tone, and if the sine frequency is at the
   * right spot in the spectrum */
  for (i = 0; i < num_decoded_buffers; ++i) {
    outbuffer = GST_BUFFER (buffers->data);
    fail_if (outbuffer == NULL, "Invalid buffer retrieved");

    /* MPEG 1 layer 2 uses 1152 samples per frame */
    expected_size = 1152 * GST_AUDIO_INFO_BPF (&audioinfo);
    fail_unless_equals_int (gst_buffer_get_size (outbuffer), expected_size);

    check_main_frequency_spot_S32 (outbuffer, expected_frequency_spot);

    buffers = g_list_remove (buffers, outbuffer);
    gst_buffer_unref (outbuffer);
    outbuffer = NULL;
  }

  g_list_free (buffers);
  buffers = NULL;

  cleanup_input_pipeline (input_pipeline);
  gst_bus_set_flushing (bus, TRUE);
  gst_element_set_bus (mpg123audiodec, NULL);
  gst_object_unref (GST_OBJECT (bus));
}
Пример #23
0
static GstFlowReturn
gst_lv2_source_fill (GstBaseSrc * base, guint64 offset,
    guint length, GstBuffer * buffer)
{
  GstLV2Source *lv2 = (GstLV2Source *) base;
  GstLV2SourceClass *klass = (GstLV2SourceClass *) GST_BASE_SRC_GET_CLASS (lv2);
  GstLV2Class *lv2_class = &klass->lv2;
  GstLV2Group *lv2_group;
  GstLV2Port *lv2_port;
  GstClockTime next_time;
  gint64 next_sample, next_byte;
  guint bytes, samples;
  GstElementClass *eclass;
  GstMapInfo map;
  gint samplerate, bpf;
  guint j, k, l;
  gfloat *out = NULL, *cv = NULL, *mem;
  gfloat val;

  /* example for tagging generated data */
  if (!lv2->tags_pushed) {
    GstTagList *taglist;

    taglist = gst_tag_list_new (GST_TAG_DESCRIPTION, "lv2 wave", NULL);

    eclass = GST_ELEMENT_CLASS (parent_class);
    if (eclass->send_event)
      eclass->send_event (GST_ELEMENT (base), gst_event_new_tag (taglist));
    else
      gst_tag_list_unref (taglist);
    lv2->tags_pushed = TRUE;
  }

  if (lv2->eos_reached) {
    GST_INFO_OBJECT (lv2, "eos");
    return GST_FLOW_EOS;
  }

  samplerate = GST_AUDIO_INFO_RATE (&lv2->info);
  bpf = GST_AUDIO_INFO_BPF (&lv2->info);

  /* if no length was given, use our default length in samples otherwise convert
   * the length in bytes to samples. */
  if (length == -1)
    samples = lv2->samples_per_buffer;
  else
    samples = length / bpf;

  /* if no offset was given, use our next logical byte */
  if (offset == -1)
    offset = lv2->next_byte;

  /* now see if we are at the byteoffset we think we are */
  if (offset != lv2->next_byte) {
    GST_DEBUG_OBJECT (lv2, "seek to new offset %" G_GUINT64_FORMAT, offset);
    /* we have a discont in the expected sample offset, do a 'seek' */
    lv2->next_sample = offset / bpf;
    lv2->next_time =
        gst_util_uint64_scale_int (lv2->next_sample, GST_SECOND, samplerate);
    lv2->next_byte = offset;
  }

  /* check for eos */
  if (lv2->check_seek_stop &&
      (lv2->sample_stop > lv2->next_sample) &&
      (lv2->sample_stop < lv2->next_sample + samples)
      ) {
    /* calculate only partial buffer */
    lv2->generate_samples_per_buffer = lv2->sample_stop - lv2->next_sample;
    next_sample = lv2->sample_stop;
    lv2->eos_reached = TRUE;

    GST_INFO_OBJECT (lv2, "eos reached");
  } else {
    /* calculate full buffer */
    lv2->generate_samples_per_buffer = samples;
    next_sample = lv2->next_sample + (lv2->reverse ? (-samples) : samples);
  }

  bytes = lv2->generate_samples_per_buffer * bpf;

  next_byte = lv2->next_byte + (lv2->reverse ? (-bytes) : bytes);
  next_time = gst_util_uint64_scale_int (next_sample, GST_SECOND, samplerate);

  GST_LOG_OBJECT (lv2, "samplerate %d", samplerate);
  GST_LOG_OBJECT (lv2,
      "next_sample %" G_GINT64_FORMAT ", ts %" GST_TIME_FORMAT, next_sample,
      GST_TIME_ARGS (next_time));

  gst_buffer_set_size (buffer, bytes);

  GST_BUFFER_OFFSET (buffer) = lv2->next_sample;
  GST_BUFFER_OFFSET_END (buffer) = next_sample;
  if (!lv2->reverse) {
    GST_BUFFER_TIMESTAMP (buffer) = lv2->timestamp_offset + lv2->next_time;
    GST_BUFFER_DURATION (buffer) = next_time - lv2->next_time;
  } else {
    GST_BUFFER_TIMESTAMP (buffer) = lv2->timestamp_offset + next_time;
    GST_BUFFER_DURATION (buffer) = lv2->next_time - next_time;
  }

  gst_object_sync_values (GST_OBJECT (lv2), GST_BUFFER_TIMESTAMP (buffer));

  lv2->next_time = next_time;
  lv2->next_sample = next_sample;
  lv2->next_byte = next_byte;

  GST_LOG_OBJECT (lv2, "generating %u samples at ts %" GST_TIME_FORMAT,
      samples, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));

  gst_buffer_map (buffer, &map, GST_MAP_WRITE);

  /* multi channel outputs */
  lv2_group = &lv2_class->out_group;
  if (lv2_group->ports->len > 1) {
    out = g_new0 (gfloat, samples * lv2_group->ports->len);
    for (j = 0; j < lv2_group->ports->len; ++j) {
      lv2_port = &g_array_index (lv2_group->ports, GstLV2Port, j);
      lilv_instance_connect_port (lv2->lv2.instance, lv2_port->index,
          out + (j * samples));
      GST_LOG_OBJECT (lv2, "connected port %d/%d", j, lv2_group->ports->len);
    }
  } else {
    lv2_port = &g_array_index (lv2_group->ports, GstLV2Port, 0);
    lilv_instance_connect_port (lv2->lv2.instance, lv2_port->index,
        (gfloat *) map.data);
    GST_LOG_OBJECT (lv2, "connected port 0");
  }

  /* cv ports */
  cv = g_new (gfloat, samples * lv2_class->num_cv_in);
  for (j = k = 0; j < lv2_class->control_in_ports->len; j++) {
    lv2_port = &g_array_index (lv2_class->control_in_ports, GstLV2Port, j);
    if (lv2_port->type != GST_LV2_PORT_CV)
      continue;

    mem = cv + (k * samples);
    val = lv2->lv2.ports.control.in[j];
    /* FIXME: use gst_control_binding_get_value_array */
    for (l = 0; l < samples; l++)
      mem[l] = val;
    lilv_instance_connect_port (lv2->lv2.instance, lv2_port->index, mem);
    k++;
  }

  lilv_instance_run (lv2->lv2.instance, samples);

  if (lv2_group->ports->len > 1) {
    gst_lv2_source_interleave_data (lv2_group->ports->len,
        (gfloat *) map.data, samples, out);
    g_free (out);
  }

  g_free (cv);

  gst_buffer_unmap (buffer, &map);

  return GST_FLOW_OK;
}
Пример #24
0
static gboolean
gst_directsound_src_prepare (GstAudioSrc * asrc, GstAudioRingBufferSpec * spec)
{
  GstDirectSoundSrc *dsoundsrc;
  WAVEFORMATEX wfx;             /* Wave format structure */
  HRESULT hRes;                 /* Result for windows functions */
  DSCBUFFERDESC descSecondary;  /* Capturebuffer description */

  dsoundsrc = GST_DIRECTSOUND_SRC (asrc);

  GST_DEBUG_OBJECT (asrc, "preparing directsoundsrc");

  /* Define buffer */
  memset (&wfx, 0, sizeof (WAVEFORMATEX));
  wfx.wFormatTag = WAVE_FORMAT_PCM;
  wfx.nChannels = GST_AUDIO_INFO_CHANNELS (&spec->info);
  wfx.nSamplesPerSec = GST_AUDIO_INFO_RATE (&spec->info);
  wfx.wBitsPerSample = GST_AUDIO_INFO_BPF (&spec->info) * 8 / wfx.nChannels;
  wfx.nBlockAlign = GST_AUDIO_INFO_BPF (&spec->info);
  wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
  /* Ignored for WAVE_FORMAT_PCM. */
  wfx.cbSize = 0;

  if (wfx.wBitsPerSample != 16 && wfx.wBitsPerSample != 8)
    goto dodgy_width;

  GST_INFO_OBJECT (asrc, "latency time: %" G_GUINT64_FORMAT " - buffer time: %"
      G_GUINT64_FORMAT, spec->latency_time, spec->buffer_time);

  /* Buffer-time should always be >= 2*latency */
  if (spec->buffer_time < spec->latency_time * 2) {
    spec->buffer_time = spec->latency_time * 2;
    GST_WARNING ("buffer-time was less than 2*latency-time, clamping");
  }

  /* Set the buffer size from our configured buffer time (in microsecs) */
  dsoundsrc->buffer_size =
      gst_util_uint64_scale_int (spec->buffer_time, wfx.nAvgBytesPerSec,
      GST_SECOND / GST_USECOND);

  GST_INFO_OBJECT (asrc, "Buffer size: %d", dsoundsrc->buffer_size);

  spec->segsize =
      gst_util_uint64_scale (spec->latency_time, wfx.nAvgBytesPerSec,
      GST_SECOND / GST_USECOND);

  /* Sanitized segsize */
  if (spec->segsize < GST_AUDIO_INFO_BPF (&spec->info))
    spec->segsize = GST_AUDIO_INFO_BPF (&spec->info);
  else if (spec->segsize % GST_AUDIO_INFO_BPF (&spec->info) != 0)
    spec->segsize =
        ((spec->segsize + GST_AUDIO_INFO_BPF (&spec->info) -
            1) / GST_AUDIO_INFO_BPF (&spec->info)) *
        GST_AUDIO_INFO_BPF (&spec->info);
  spec->segtotal = dsoundsrc->buffer_size / spec->segsize;
  /* The device usually takes time = 1-2 segments to start producing buffers */
  spec->seglatency = spec->segtotal + 2;

  /* Fetch and set the actual latency time that will be used */
  dsoundsrc->latency_time =
      gst_util_uint64_scale (spec->segsize, GST_SECOND / GST_USECOND,
      GST_AUDIO_INFO_BPF (&spec->info) * GST_AUDIO_INFO_RATE (&spec->info));

  GST_INFO_OBJECT (asrc, "actual latency time: %" G_GUINT64_FORMAT,
      spec->latency_time);

  /* Init secondary buffer desciption */
  memset (&descSecondary, 0, sizeof (DSCBUFFERDESC));
  descSecondary.dwSize = sizeof (DSCBUFFERDESC);
  descSecondary.dwFlags = 0;
  descSecondary.dwReserved = 0;

  /* This is not primary buffer so have to set size  */
  descSecondary.dwBufferBytes = dsoundsrc->buffer_size;
  descSecondary.lpwfxFormat = &wfx;

  /* Create buffer */
  hRes = IDirectSoundCapture_CreateCaptureBuffer (dsoundsrc->pDSC,
      &descSecondary, &dsoundsrc->pDSBSecondary, NULL);
  if (hRes != DS_OK)
    goto capture_buffer;

  dsoundsrc->bytes_per_sample = GST_AUDIO_INFO_BPF (&spec->info);

  GST_INFO_OBJECT (asrc,
      "bytes/sec: %lu, buffer size: %d, segsize: %d, segtotal: %d",
      wfx.nAvgBytesPerSec, dsoundsrc->buffer_size, spec->segsize,
      spec->segtotal);

  /* Not read anything yet */
  dsoundsrc->current_circular_offset = 0;

  GST_INFO_OBJECT (asrc, "channels: %d, rate: %d, bytes_per_sample: %d"
      " WAVEFORMATEX.nSamplesPerSec: %ld, WAVEFORMATEX.wBitsPerSample: %d,"
      " WAVEFORMATEX.nBlockAlign: %d, WAVEFORMATEX.nAvgBytesPerSec: %ld",
      GST_AUDIO_INFO_CHANNELS (&spec->info), GST_AUDIO_INFO_RATE (&spec->info),
      GST_AUDIO_INFO_BPF (&spec->info), wfx.nSamplesPerSec, wfx.wBitsPerSample,
      wfx.nBlockAlign, wfx.nAvgBytesPerSec);

  return TRUE;

capture_buffer:
  {
    GST_ELEMENT_ERROR (dsoundsrc, RESOURCE, OPEN_READ,
        ("Unable to create capturebuffer"), (NULL));
    return FALSE;
  }
dodgy_width:
  {
    GST_ELEMENT_ERROR (dsoundsrc, RESOURCE, OPEN_READ,
        ("Unexpected width %d", wfx.wBitsPerSample), (NULL));
    return FALSE;
  }
}
static gboolean
gst_osx_audio_ring_buffer_acquire (GstAudioRingBuffer * buf,
    GstAudioRingBufferSpec * spec)
{
  gboolean ret = FALSE, is_passthrough = FALSE;
  GstOsxAudioRingBuffer *osxbuf;
  AudioStreamBasicDescription format;

  osxbuf = GST_OSX_AUDIO_RING_BUFFER (buf);

  if (RINGBUFFER_IS_SPDIF (spec->type)) {
    format.mFormatID = kAudioFormat60958AC3;
    format.mSampleRate = (double) GST_AUDIO_INFO_RATE (&spec->info);
    format.mChannelsPerFrame = 2;
    format.mFormatFlags = kAudioFormatFlagIsSignedInteger |
        kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonMixable;
    format.mBytesPerFrame = 0;
    format.mBitsPerChannel = 16;
    format.mBytesPerPacket = 6144;
    format.mFramesPerPacket = 1536;
    format.mReserved = 0;
    spec->segsize = 6144;
    spec->segtotal = 10;
    is_passthrough = TRUE;
  } else {
    int width, depth;
    /* Fill out the audio description we're going to be using */
    format.mFormatID = kAudioFormatLinearPCM;
    format.mSampleRate = (double) GST_AUDIO_INFO_RATE (&spec->info);
    format.mChannelsPerFrame = GST_AUDIO_INFO_CHANNELS (&spec->info);
    if (GST_AUDIO_INFO_IS_FLOAT (&spec->info)) {
      format.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
      width = depth = GST_AUDIO_INFO_WIDTH (&spec->info);
    } else {
      format.mFormatFlags = kAudioFormatFlagIsSignedInteger;
      width = GST_AUDIO_INFO_WIDTH (&spec->info);
      depth = GST_AUDIO_INFO_DEPTH (&spec->info);
      if (width == depth) {
        format.mFormatFlags |= kAudioFormatFlagIsPacked;
      } else {
        format.mFormatFlags |= kAudioFormatFlagIsAlignedHigh;
      }
    }

    if (GST_AUDIO_INFO_IS_BIG_ENDIAN (&spec->info)) {
      format.mFormatFlags |= kAudioFormatFlagIsBigEndian;
    }

    format.mBytesPerFrame = GST_AUDIO_INFO_BPF (&spec->info);
    format.mBitsPerChannel = depth;
    format.mBytesPerPacket = GST_AUDIO_INFO_BPF (&spec->info);
    format.mFramesPerPacket = 1;
    format.mReserved = 0;
    spec->segsize =
        (spec->latency_time * GST_AUDIO_INFO_RATE (&spec->info) /
        G_USEC_PER_SEC) * GST_AUDIO_INFO_BPF (&spec->info);
    spec->segtotal = spec->buffer_time / spec->latency_time;
    is_passthrough = FALSE;
  }

  GST_DEBUG_OBJECT (osxbuf, "Format: " CORE_AUDIO_FORMAT,
      CORE_AUDIO_FORMAT_ARGS (format));

  /* gst_audio_ring_buffer_set_channel_positions is not called
   * since the AUs perform channel reordering themselves.
   * (see gst_core_audio_set_channel_layout) */

  buf->size = spec->segtotal * spec->segsize;
  buf->memory = g_malloc0 (buf->size);

  ret = gst_core_audio_initialize (osxbuf->core_audio, format, spec->caps,
      is_passthrough);

  if (!ret) {
    g_free (buf->memory);
    buf->memory = NULL;
    buf->size = 0;
  }

  osxbuf->segoffset = 0;

  return ret;
}
Пример #26
0
static gboolean
gst_alsasrc_prepare (GstAudioSrc * asrc, GstAudioRingBufferSpec * spec)
{
  GstAlsaSrc *alsa;
  gint err;

  alsa = GST_ALSA_SRC (asrc);

  if (!alsasrc_parse_spec (alsa, spec))
    goto spec_parse;

  CHECK (snd_pcm_nonblock (alsa->handle, 0), non_block);

  CHECK (set_hwparams (alsa), hw_params_failed);
  CHECK (set_swparams (alsa), sw_params_failed);
  CHECK (snd_pcm_prepare (alsa->handle), prepare_failed);

  alsa->bpf = GST_AUDIO_INFO_BPF (&spec->info);
  spec->segsize = alsa->period_size * alsa->bpf;
  spec->segtotal = alsa->buffer_size / alsa->period_size;

  {
    snd_output_t *out_buf = NULL;
    char *msg = NULL;

    snd_output_buffer_open (&out_buf);
    snd_pcm_dump_hw_setup (alsa->handle, out_buf);
    snd_output_buffer_string (out_buf, &msg);
    GST_DEBUG_OBJECT (alsa, "Hardware setup: \n%s", msg);
    snd_output_close (out_buf);
    snd_output_buffer_open (&out_buf);
    snd_pcm_dump_sw_setup (alsa->handle, out_buf);
    snd_output_buffer_string (out_buf, &msg);
    GST_DEBUG_OBJECT (alsa, "Software setup: \n%s", msg);
    snd_output_close (out_buf);
  }

#ifdef SND_CHMAP_API_VERSION
  alsa_detect_channels_mapping (GST_OBJECT (alsa), alsa->handle, spec,
      alsa->channels, GST_AUDIO_BASE_SRC (alsa)->ringbuffer);
#endif /* SND_CHMAP_API_VERSION */

  return TRUE;

  /* ERRORS */
spec_parse:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Error parsing spec"));
    return FALSE;
  }
non_block:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Could not set device to blocking: %s", snd_strerror (err)));
    return FALSE;
  }
hw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of hwparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
sw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of swparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
prepare_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Prepare failed: %s", snd_strerror (err)));
    return FALSE;
  }
}
static GstFlowReturn
gst_videoframe_audiolevel_asink_chain (GstPad * pad, GstObject * parent,
    GstBuffer * inbuf)
{
  GstClockTime timestamp, cur_time;
  GstVideoFrameAudioLevel *self = GST_VIDEOFRAME_AUDIOLEVEL (parent);
  GstBuffer *buf;
  gsize inbuf_size;
  guint64 start_offset, end_offset;
  GstClockTime running_time;
  gint rate, bpf;
  gboolean discont = FALSE;

  timestamp = GST_BUFFER_TIMESTAMP (inbuf);
  running_time =
      gst_segment_to_running_time (&self->asegment, GST_FORMAT_TIME, timestamp);

  rate = GST_AUDIO_INFO_RATE (&self->ainfo);
  bpf = GST_AUDIO_INFO_BPF (&self->ainfo);
  start_offset = gst_util_uint64_scale (timestamp, rate, GST_SECOND);
  inbuf_size = gst_buffer_get_size (inbuf);
  end_offset = start_offset + inbuf_size / bpf;

  g_mutex_lock (&self->mutex);

  if (GST_BUFFER_IS_DISCONT (inbuf)
      || GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_RESYNC)
      || self->first_time == GST_CLOCK_TIME_NONE) {
    discont = TRUE;
  } else {
    guint64 diff, max_sample_diff;

    /* Check discont, based on audiobasesink */
    if (start_offset <= self->next_offset)
      diff = self->next_offset - start_offset;
    else
      diff = start_offset - self->next_offset;

    max_sample_diff =
        gst_util_uint64_scale_int (self->alignment_threshold, rate, GST_SECOND);

    /* Discont! */
    if (G_UNLIKELY (diff >= max_sample_diff)) {
      if (self->discont_wait > 0) {
        if (self->discont_time == GST_CLOCK_TIME_NONE) {
          self->discont_time = timestamp;
        } else if (timestamp - self->discont_time >= self->discont_wait) {
          discont = TRUE;
          self->discont_time = GST_CLOCK_TIME_NONE;
        }
      } else {
        discont = TRUE;
      }
    } else if (G_UNLIKELY (self->discont_time != GST_CLOCK_TIME_NONE)) {
      /* we have had a discont, but are now back on track! */
      self->discont_time = GST_CLOCK_TIME_NONE;
    }
  }

  if (discont) {
    /* Have discont, need resync */
    if (self->next_offset != -1)
      GST_INFO_OBJECT (pad, "Have discont. Expected %"
          G_GUINT64_FORMAT ", got %" G_GUINT64_FORMAT,
          self->next_offset, start_offset);
    self->total_frames = 0;
    self->first_time = running_time;
    self->next_offset = end_offset;
  } else {
    self->next_offset += inbuf_size / bpf;
  }

  gst_adapter_push (self->adapter, gst_buffer_ref (inbuf));

  GST_DEBUG_OBJECT (self, "Queue length %i",
      g_queue_get_length (&self->vtimeq));

  while (TRUE) {
    GstClockTime *vt0, *vt1;
    GstClockTime vtemp;
    GstMessage *msg;
    gsize bytes, available_bytes;

    vtemp = GST_CLOCK_TIME_NONE;

    while (!(g_queue_get_length (&self->vtimeq) >= 2 || self->video_eos_flag
            || self->audio_flush_flag || self->shutdown_flag))
      g_cond_wait (&self->cond, &self->mutex);

    if (self->audio_flush_flag || self->shutdown_flag) {
      g_mutex_unlock (&self->mutex);
      gst_buffer_unref (inbuf);
      return GST_FLOW_FLUSHING;
    } else if (self->video_eos_flag) {
      GST_DEBUG_OBJECT (self, "Video EOS flag alert");
      /* nothing to do here if queue is empty */
      if (g_queue_get_length (&self->vtimeq) == 0)
        break;

      if (g_queue_get_length (&self->vtimeq) < 2) {
        vtemp = self->vsegment.position;
      } else if (self->vsegment.position == GST_CLOCK_TIME_NONE) {
        /* g_queue_get_length is surely >= 2 at this point
         * so the adapter isn't empty */
        buf =
            gst_adapter_take_buffer (self->adapter,
            gst_adapter_available (self->adapter));
        if (buf != NULL) {
          GstMessage *msg;
          msg = update_rms_from_buffer (self, buf);
          g_mutex_unlock (&self->mutex);
          gst_element_post_message (GST_ELEMENT (self), msg);
          gst_buffer_unref (buf);
          g_mutex_lock (&self->mutex);  /* we unlock again later */
        }
        break;
      }
    } else if (g_queue_get_length (&self->vtimeq) < 2) {
      continue;
    }

    vt0 = g_queue_pop_head (&self->vtimeq);
    if (vtemp == GST_CLOCK_TIME_NONE)
      vt1 = g_queue_peek_head (&self->vtimeq);
    else
      vt1 = &vtemp;

    cur_time =
        self->first_time + gst_util_uint64_scale (self->total_frames,
        GST_SECOND, rate);
    GST_DEBUG_OBJECT (self,
        "Processing: current time is %" GST_TIME_FORMAT,
        GST_TIME_ARGS (cur_time));
    GST_DEBUG_OBJECT (self, "Total frames is %i with a rate of %d",
        self->total_frames, rate);
    GST_DEBUG_OBJECT (self, "Start time is %" GST_TIME_FORMAT,
        GST_TIME_ARGS (self->first_time));
    GST_DEBUG_OBJECT (self, "Time on top is %" GST_TIME_FORMAT,
        GST_TIME_ARGS (*vt0));

    if (cur_time < *vt0) {
      guint num_frames =
          gst_util_uint64_scale (*vt0 - cur_time, rate, GST_SECOND);
      bytes = num_frames * GST_AUDIO_INFO_BPF (&self->ainfo);
      available_bytes = gst_adapter_available (self->adapter);
      if (available_bytes == 0) {
        g_queue_push_head (&self->vtimeq, vt0);
        break;
      }
      if (bytes == 0) {
        cur_time = *vt0;
      } else {
        GST_DEBUG_OBJECT (self,
            "Flushed %" G_GSIZE_FORMAT " out of %" G_GSIZE_FORMAT " bytes",
            bytes, available_bytes);
        gst_adapter_flush (self->adapter, MIN (bytes, available_bytes));
        self->total_frames += num_frames;
        if (available_bytes <= bytes) {
          g_queue_push_head (&self->vtimeq, vt0);
          break;
        }
        cur_time =
            self->first_time + gst_util_uint64_scale (self->total_frames,
            GST_SECOND, rate);
      }
    }
    if (*vt1 > cur_time) {
      bytes =
          GST_AUDIO_INFO_BPF (&self->ainfo) * gst_util_uint64_scale (*vt1 -
          cur_time, rate, GST_SECOND);
    } else {
      bytes = 0;                /* We just need to discard vt0 */
    }
    available_bytes = gst_adapter_available (self->adapter);
    GST_DEBUG_OBJECT (self,
        "Adapter contains %" G_GSIZE_FORMAT " out of %" G_GSIZE_FORMAT " bytes",
        available_bytes, bytes);

    if (available_bytes < bytes) {
      g_queue_push_head (&self->vtimeq, vt0);
      goto done;
    }

    if (bytes > 0) {
      buf = gst_adapter_take_buffer (self->adapter, bytes);
      g_assert (buf != NULL);
    } else {
      /* Just an empty buffer */
      buf = gst_buffer_new ();
    }
    msg = update_rms_from_buffer (self, buf);
    g_mutex_unlock (&self->mutex);
    gst_element_post_message (GST_ELEMENT (self), msg);
    g_mutex_lock (&self->mutex);

    gst_buffer_unref (buf);
    g_free (vt0);
    if (available_bytes == bytes)
      break;
  }
done:
  g_mutex_unlock (&self->mutex);
  return gst_pad_push (self->asrcpad, inbuf);
}
Пример #28
0
/**
 * gst_audio_info_convert:
 * @info: a #GstAudioInfo
 * @src_fmt: #GstFormat of the @src_val
 * @src_val: value to convert
 * @dest_fmt: #GstFormat of the @dest_val
 * @dest_val: pointer to destination value
 *
 * Converts among various #GstFormat types.  This function handles
 * GST_FORMAT_BYTES, GST_FORMAT_TIME, and GST_FORMAT_DEFAULT.  For
 * raw audio, GST_FORMAT_DEFAULT corresponds to audio frames.  This
 * function can be used to handle pad queries of the type GST_QUERY_CONVERT.
 *
 * Returns: TRUE if the conversion was successful.
 */
gboolean
gst_audio_info_convert (const GstAudioInfo * info,
    GstFormat src_fmt, gint64 src_val, GstFormat dest_fmt, gint64 * dest_val)
{
  gboolean res = TRUE;
  gint bpf, rate;

  GST_DEBUG ("converting value %" G_GINT64_FORMAT " from %s (%d) to %s (%d)",
      src_val, gst_format_get_name (src_fmt), src_fmt,
      gst_format_get_name (dest_fmt), dest_fmt);

  if (src_fmt == dest_fmt || src_val == -1) {
    *dest_val = src_val;
    goto done;
  }

  /* get important info */
  bpf = GST_AUDIO_INFO_BPF (info);
  rate = GST_AUDIO_INFO_RATE (info);

  if (bpf == 0 || rate == 0) {
    GST_DEBUG ("no rate or bpf configured");
    res = FALSE;
    goto done;
  }

  switch (src_fmt) {
    case GST_FORMAT_BYTES:
      switch (dest_fmt) {
        case GST_FORMAT_TIME:
          *dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val / bpf, rate);
          break;
        case GST_FORMAT_DEFAULT:
          *dest_val = src_val / bpf;
          break;
        default:
          res = FALSE;
          break;
      }
      break;
    case GST_FORMAT_DEFAULT:
      switch (dest_fmt) {
        case GST_FORMAT_TIME:
          *dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val, rate);
          break;
        case GST_FORMAT_BYTES:
          *dest_val = src_val * bpf;
          break;
        default:
          res = FALSE;
          break;
      }
      break;
    case GST_FORMAT_TIME:
      switch (dest_fmt) {
        case GST_FORMAT_DEFAULT:
          *dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate);
          break;
        case GST_FORMAT_BYTES:
          *dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate);
          *dest_val *= bpf;
          break;
        default:
          res = FALSE;
          break;
      }
      break;
    default:
      res = FALSE;
      break;
  }
done:

  GST_DEBUG ("ret=%d result %" G_GINT64_FORMAT, res, res ? *dest_val : -1);

  return res;
}
Пример #29
0
static GstFlowReturn
gst_ladspa_source_type_fill (GstBaseSrc * base, guint64 offset,
    guint length, GstBuffer * buffer)
{
  GstLADSPASource *ladspa = GST_LADSPA_SOURCE (base);
  GstClockTime next_time;
  gint64 next_sample, next_byte;
  gint bytes, samples;
  GstElementClass *eclass;
  GstMapInfo map;
  gint samplerate, bpf;

  /* example for tagging generated data */
  if (!ladspa->tags_pushed) {
    GstTagList *taglist;

    taglist = gst_tag_list_new (GST_TAG_DESCRIPTION, "ladspa wave", NULL);

    eclass = GST_ELEMENT_CLASS (gst_ladspa_source_type_parent_class);
    if (eclass->send_event)
      eclass->send_event (GST_ELEMENT (base), gst_event_new_tag (taglist));
    else
      gst_tag_list_unref (taglist);
    ladspa->tags_pushed = TRUE;
  }

  if (ladspa->eos_reached) {
    GST_INFO_OBJECT (ladspa, "eos");
    return GST_FLOW_EOS;
  }

  samplerate = GST_AUDIO_INFO_RATE (&ladspa->info);
  bpf = GST_AUDIO_INFO_BPF (&ladspa->info);

  /* if no length was given, use our default length in samples otherwise convert
   * the length in bytes to samples. */
  if (length == -1)
    samples = ladspa->samples_per_buffer;
  else
    samples = length / bpf;

  /* if no offset was given, use our next logical byte */
  if (offset == -1)
    offset = ladspa->next_byte;

  /* now see if we are at the byteoffset we think we are */
  if (offset != ladspa->next_byte) {
    GST_DEBUG_OBJECT (ladspa, "seek to new offset %" G_GUINT64_FORMAT, offset);
    /* we have a discont in the expected sample offset, do a 'seek' */
    ladspa->next_sample = offset / bpf;
    ladspa->next_time =
        gst_util_uint64_scale_int (ladspa->next_sample, GST_SECOND, samplerate);
    ladspa->next_byte = offset;
  }

  /* check for eos */
  if (ladspa->check_seek_stop &&
      (ladspa->sample_stop > ladspa->next_sample) &&
      (ladspa->sample_stop < ladspa->next_sample + samples)
      ) {
    /* calculate only partial buffer */
    ladspa->generate_samples_per_buffer =
        ladspa->sample_stop - ladspa->next_sample;
    next_sample = ladspa->sample_stop;
    ladspa->eos_reached = TRUE;
  } else {
    /* calculate full buffer */
    ladspa->generate_samples_per_buffer = samples;
    next_sample =
        ladspa->next_sample + (ladspa->reverse ? (-samples) : samples);
  }

  bytes = ladspa->generate_samples_per_buffer * bpf;

  next_byte = ladspa->next_byte + (ladspa->reverse ? (-bytes) : bytes);
  next_time = gst_util_uint64_scale_int (next_sample, GST_SECOND, samplerate);

  GST_LOG_OBJECT (ladspa, "samplerate %d", samplerate);
  GST_LOG_OBJECT (ladspa,
      "next_sample %" G_GINT64_FORMAT ", ts %" GST_TIME_FORMAT, next_sample,
      GST_TIME_ARGS (next_time));

  gst_buffer_set_size (buffer, bytes);

  GST_BUFFER_OFFSET (buffer) = ladspa->next_sample;
  GST_BUFFER_OFFSET_END (buffer) = next_sample;
  if (!ladspa->reverse) {
    GST_BUFFER_TIMESTAMP (buffer) =
        ladspa->timestamp_offset + ladspa->next_time;
    GST_BUFFER_DURATION (buffer) = next_time - ladspa->next_time;
  } else {
    GST_BUFFER_TIMESTAMP (buffer) = ladspa->timestamp_offset + next_time;
    GST_BUFFER_DURATION (buffer) = ladspa->next_time - next_time;
  }

  gst_object_sync_values (GST_OBJECT (ladspa), GST_BUFFER_TIMESTAMP (buffer));

  ladspa->next_time = next_time;
  ladspa->next_sample = next_sample;
  ladspa->next_byte = next_byte;

  GST_LOG_OBJECT (ladspa, "generating %u samples at ts %" GST_TIME_FORMAT,
      ladspa->generate_samples_per_buffer,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));

  gst_buffer_map (buffer, &map, GST_MAP_WRITE);
  gst_ladspa_transform (&ladspa->ladspa, map.data,
      ladspa->generate_samples_per_buffer, NULL);
  gst_buffer_unmap (buffer, &map);

  return GST_FLOW_OK;
}
Пример #30
0
static gboolean
gst_alsasrc_prepare (GstAudioSrc * asrc, GstAudioRingBufferSpec * spec)
{
  GstAlsaSrc *alsa;
  gint err;

  alsa = GST_ALSA_SRC (asrc);

  if (!alsasrc_parse_spec (alsa, spec))
    goto spec_parse;

  CHECK (snd_pcm_nonblock (alsa->handle, 0), non_block);

  CHECK (set_hwparams (alsa), hw_params_failed);
  CHECK (set_swparams (alsa), sw_params_failed);
  CHECK (snd_pcm_prepare (alsa->handle), prepare_failed);

  alsa->bpf = GST_AUDIO_INFO_BPF (&spec->info);
  spec->segsize = alsa->period_size * alsa->bpf;
  spec->segtotal = alsa->buffer_size / alsa->period_size;

  {
    snd_output_t *out_buf = NULL;
    char *msg = NULL;

    snd_output_buffer_open (&out_buf);
    snd_pcm_dump_hw_setup (alsa->handle, out_buf);
    snd_output_buffer_string (out_buf, &msg);
    GST_DEBUG_OBJECT (alsa, "Hardware setup: \n%s", msg);
    snd_output_close (out_buf);
    snd_output_buffer_open (&out_buf);
    snd_pcm_dump_sw_setup (alsa->handle, out_buf);
    snd_output_buffer_string (out_buf, &msg);
    GST_DEBUG_OBJECT (alsa, "Software setup: \n%s", msg);
    snd_output_close (out_buf);
  }

#ifdef SND_CHMAP_API_VERSION
  if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_RAW && alsa->channels < 9) {
    snd_pcm_chmap_t *chmap = snd_pcm_get_chmap (alsa->handle);
    if (chmap && chmap->channels == alsa->channels) {
      GstAudioChannelPosition pos[8];
      if (alsa_chmap_to_channel_positions (chmap, pos))
        gst_audio_ring_buffer_set_channel_positions (GST_AUDIO_BASE_SRC
            (alsa)->ringbuffer, pos);
    }
    free (chmap);
  }
#endif /* SND_CHMAP_API_VERSION */

  return TRUE;

  /* ERRORS */
spec_parse:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Error parsing spec"));
    return FALSE;
  }
non_block:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Could not set device to blocking: %s", snd_strerror (err)));
    return FALSE;
  }
hw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of hwparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
sw_params_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Setting of swparams failed: %s", snd_strerror (err)));
    return FALSE;
  }
prepare_failed:
  {
    GST_ELEMENT_ERROR (alsa, RESOURCE, SETTINGS, (NULL),
        ("Prepare failed: %s", snd_strerror (err)));
    return FALSE;
  }
}