/**
 * gst_audio_info_is_equal:
 * @info: a #GstAudioInfo
 * @other: a #GstAudioInfo
 *
 * Compares two #GstAudioInfo and returns whether they are equal or not
 *
 * Returns: %TRUE if @info and @other are equal, else %FALSE.
 *
 * Since: 1.2
 *
 */
gboolean
gst_audio_info_is_equal (const GstAudioInfo * info, const GstAudioInfo * other)
{
  if (info == other)
    return TRUE;
  if (info->finfo == NULL || other->finfo == NULL)
    return FALSE;
  if (GST_AUDIO_INFO_FORMAT (info) != GST_AUDIO_INFO_FORMAT (other))
    return FALSE;
  if (GST_AUDIO_INFO_FLAGS (info) != GST_AUDIO_INFO_FLAGS (other))
    return FALSE;
  if (GST_AUDIO_INFO_LAYOUT (info) != GST_AUDIO_INFO_LAYOUT (other))
    return FALSE;
  if (GST_AUDIO_INFO_RATE (info) != GST_AUDIO_INFO_RATE (other))
    return FALSE;
  if (GST_AUDIO_INFO_CHANNELS (info) != GST_AUDIO_INFO_CHANNELS (other))
    return FALSE;
  if (GST_AUDIO_INFO_CHANNELS (info) > 64)
    return TRUE;
  if (memcmp (info->position, other->position,
          GST_AUDIO_INFO_CHANNELS (info) * sizeof (GstAudioChannelPosition)) !=
      0)
    return FALSE;

  return TRUE;
}
gboolean
gst_pulse_fill_format_info (GstAudioRingBufferSpec * spec, pa_format_info ** f,
    guint * channels)
{
  pa_format_info *format;
  pa_sample_format_t sf = PA_SAMPLE_INVALID;
  GstAudioInfo *ainfo = &spec->info;

  format = pa_format_info_new ();

  if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_MU_LAW
      && GST_AUDIO_INFO_WIDTH (ainfo) == 8) {
    format->encoding = PA_ENCODING_PCM;
    sf = PA_SAMPLE_ULAW;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_A_LAW
      && GST_AUDIO_INFO_WIDTH (ainfo) == 8) {
    format->encoding = PA_ENCODING_PCM;
    sf = PA_SAMPLE_ALAW;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_RAW) {
    format->encoding = PA_ENCODING_PCM;
    if (!gstaudioformat_to_pasampleformat (GST_AUDIO_INFO_FORMAT (ainfo), &sf))
      goto fail;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_AC3) {
    format->encoding = PA_ENCODING_AC3_IEC61937;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_EAC3) {
    format->encoding = PA_ENCODING_EAC3_IEC61937;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_DTS) {
    format->encoding = PA_ENCODING_DTS_IEC61937;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_MPEG) {
    format->encoding = PA_ENCODING_MPEG_IEC61937;
  } else {
    goto fail;
  }

  if (format->encoding == PA_ENCODING_PCM) {
    pa_format_info_set_sample_format (format, sf);
    pa_format_info_set_channels (format, GST_AUDIO_INFO_CHANNELS (ainfo));
  }

  pa_format_info_set_rate (format, GST_AUDIO_INFO_RATE (ainfo));

  if (!pa_format_info_valid (format))
    goto fail;

  *f = format;
  *channels = GST_AUDIO_INFO_CHANNELS (ainfo);

  return TRUE;

fail:
  if (format)
    pa_format_info_free (format);
  return FALSE;
}
static gboolean
gst_openal_src_prepare (GstAudioSrc * audiosrc, GstAudioRingBufferSpec * spec)
{
  GstOpenalSrc *openalsrc = GST_OPENAL_SRC (audiosrc);

  gst_openal_src_parse_spec (openalsrc, spec);
  if (openalsrc->format == AL_NONE) {
    GST_ELEMENT_ERROR (openalsrc, RESOURCE, SETTINGS, (NULL),
        ("Unable to get type %d, format %d, and %d channels", spec->type,
            GST_AUDIO_INFO_FORMAT (&spec->info),
            GST_AUDIO_INFO_CHANNELS (&spec->info)));
    return FALSE;
  }

  openalsrc->device =
      alcCaptureOpenDevice (openalsrc->default_device, openalsrc->rate,
      openalsrc->format, openalsrc->buffer_length);

  if (!openalsrc->device) {
    GST_ELEMENT_ERROR (openalsrc, RESOURCE, OPEN_READ,
        ("Could not open device."), GST_ALC_ERROR (openalsrc->device));
    return FALSE;
  }

  openalsrc->default_device_name =
      g_strdup (alcGetString (openalsrc->device, ALC_DEVICE_SPECIFIER));

  alcCaptureStart (openalsrc->device);

  return TRUE;
}
static gboolean
gst_level_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
{
  GstLevel *filter = GST_LEVEL (trans);
  GstAudioInfo info;
  gint i, channels;

  if (!gst_audio_info_from_caps (&info, in))
    return FALSE;

  switch (GST_AUDIO_INFO_FORMAT (&info)) {
    case GST_AUDIO_FORMAT_S8:
      filter->process = gst_level_calculate_gint8;
      break;
    case GST_AUDIO_FORMAT_S16:
      filter->process = gst_level_calculate_gint16;
      break;
    case GST_AUDIO_FORMAT_S32:
      filter->process = gst_level_calculate_gint32;
      break;
    case GST_AUDIO_FORMAT_F32:
      filter->process = gst_level_calculate_gfloat;
      break;
    case GST_AUDIO_FORMAT_F64:
      filter->process = gst_level_calculate_gdouble;
      break;
    default:
      filter->process = NULL;
      break;
  }

  filter->info = info;

  channels = GST_AUDIO_INFO_CHANNELS (&info);

  /* allocate channel variable arrays */
  g_free (filter->CS);
  g_free (filter->peak);
  g_free (filter->last_peak);
  g_free (filter->decay_peak);
  g_free (filter->decay_peak_base);
  g_free (filter->decay_peak_age);
  filter->CS = g_new (gdouble, channels);
  filter->peak = g_new (gdouble, channels);
  filter->last_peak = g_new (gdouble, channels);
  filter->decay_peak = g_new (gdouble, channels);
  filter->decay_peak_base = g_new (gdouble, channels);

  filter->decay_peak_age = g_new (GstClockTime, channels);

  for (i = 0; i < channels; ++i) {
    filter->CS[i] = filter->peak[i] = filter->last_peak[i] =
        filter->decay_peak[i] = filter->decay_peak_base[i] = 0.0;
    filter->decay_peak_age[i] = G_GUINT64_CONSTANT (0);
  }

  gst_level_recalc_interval_frames (filter);

  return TRUE;
}
static void
render_lines (GstAudioVisualizer * base, guint32 * vdata, gint16 * adata,
    guint num_samples)
{
  gint channels = GST_AUDIO_INFO_CHANNELS (&base->ainfo);
  guint i, c, s, x, y, oy;
  gfloat dx, dy;
  guint w = GST_VIDEO_INFO_WIDTH (&base->vinfo);
  guint h = GST_VIDEO_INFO_HEIGHT (&base->vinfo);
  gint x2, y2;

  /* draw lines */
  dx = (gfloat) (w - 1) / (gfloat) num_samples;
  dy = (h - 1) / 65536.0;
  oy = (h - 1) / 2;
  for (c = 0; c < channels; c++) {
    s = c;
    x2 = 0;
    y2 = (guint) (oy + (gfloat) adata[s] * dy);
    for (i = 1; i < num_samples; i++) {
      x = (guint) ((gfloat) i * dx);
      y = (guint) (oy + (gfloat) adata[s] * dy);
      s += channels;
      draw_line_aa (vdata, x2, x, y2, y, w, 0x00FFFFFF);
      x2 = x;
      y2 = y;
    }
  }
}
static gboolean
gst_opus_enc_set_format (GstAudioEncoder * benc, GstAudioInfo * info)
{
  GstOpusEnc *enc;

  enc = GST_OPUS_ENC (benc);

  g_mutex_lock (enc->property_lock);

  enc->n_channels = GST_AUDIO_INFO_CHANNELS (info);
  enc->sample_rate = GST_AUDIO_INFO_RATE (info);
  gst_opus_enc_setup_channel_mappings (enc, info);
  GST_DEBUG_OBJECT (benc, "Setup with %d channels, %d Hz", enc->n_channels,
      enc->sample_rate);

  /* handle reconfigure */
  if (enc->state) {
    opus_multistream_encoder_destroy (enc->state);
    enc->state = NULL;
  }
  if (!gst_opus_enc_setup (enc))
    return FALSE;

  enc->frame_samples = gst_opus_enc_get_frame_samples (enc);

  /* feedback to base class */
  gst_opus_enc_setup_base_class (enc, benc);

  g_mutex_unlock (enc->property_lock);

  return TRUE;
}
Exemple #7
0
static gboolean
gst_audio_filter_template_setup (GstAudioFilter * filter,
    const GstAudioInfo * info)
{
  GstAudioFilterTemplate *filter_template;
  GstAudioFormat fmt;
  gint chans, rate;

  filter_template = GST_AUDIO_FILTER_TEMPLATE (filter);

  rate = GST_AUDIO_INFO_RATE (info);
  chans = GST_AUDIO_INFO_CHANNELS (info);
  fmt = GST_AUDIO_INFO_FORMAT (info);

  GST_INFO_OBJECT (filter_template, "format %d (%s), rate %d, %d channels",
      fmt, GST_AUDIO_INFO_NAME (info), rate, chans);

  /* if any setup needs to be done (like memory allocated), do it here */

  /* The audio filter base class also saves the audio info in
   * GST_AUDIO_FILTER_INFO(filter) so it's automatically available
   * later from there as well */

  return TRUE;
}
static gboolean
gst_audio_panorama_set_caps (GstBaseTransform * base, GstCaps * incaps,
    GstCaps * outcaps)
{
  GstAudioPanorama *filter = GST_AUDIO_PANORAMA (base);
  GstAudioInfo info;

  /*GST_INFO ("incaps are %" GST_PTR_FORMAT, incaps); */
  if (!gst_audio_info_from_caps (&info, incaps))
    goto no_format;

  GST_DEBUG ("try to process %d input with %d channels",
      GST_AUDIO_INFO_FORMAT (&info), GST_AUDIO_INFO_CHANNELS (&info));

  if (!gst_audio_panorama_set_process_function (filter, &info))
    goto no_format;

  filter->info = info;

  return TRUE;

no_format:
  {
    GST_DEBUG ("invalid caps");
    return FALSE;
  }
}
static gboolean
gst_freeverb_set_caps (GstBaseTransform * base, GstCaps * incaps,
                       GstCaps * outcaps)
{
    GstFreeverb *filter = GST_FREEVERB (base);
    GstAudioInfo info;

    /*GST_INFO ("incaps are %" GST_PTR_FORMAT, incaps); */
    if (!gst_audio_info_from_caps (&info, incaps))
        goto no_format;

    GST_DEBUG ("try to process %d input with %d channels",
               GST_AUDIO_INFO_FORMAT (&info), GST_AUDIO_INFO_CHANNELS (&info));

    if (!gst_freeverb_set_process_function (filter, &info))
        goto no_format;

    filter->info = info;

    gst_freeverb_init_rev_model (filter);
    filter->drained = FALSE;
    GST_INFO_OBJECT (base, "model configured");

    return TRUE;

no_format:
    {
        GST_DEBUG ("invalid caps");
        return FALSE;
    }
}
/* get notified of caps and plug in the correct process function */
static gboolean
gst_audio_fx_base_fir_filter_setup (GstAudioFilter * base,
    const GstAudioInfo * info)
{
  GstAudioFXBaseFIRFilter *self = GST_AUDIO_FX_BASE_FIR_FILTER (base);

  g_mutex_lock (&self->lock);
  if (self->buffer) {
    gst_audio_fx_base_fir_filter_push_residue (self);
    g_free (self->buffer);
    self->buffer = NULL;
    self->buffer_fill = 0;
    self->buffer_length = 0;
    self->start_ts = GST_CLOCK_TIME_NONE;
    self->start_off = GST_BUFFER_OFFSET_NONE;
    self->nsamples_out = 0;
    self->nsamples_in = 0;
  }

  gst_audio_fx_base_fir_filter_select_process_function (self,
      GST_AUDIO_INFO_FORMAT (info), GST_AUDIO_INFO_CHANNELS (info));
  g_mutex_unlock (&self->lock);

  return (self->process != NULL);
}
Exemple #11
0
static gboolean
gst_deinterleave_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
{
  GstDeinterleave *self = GST_DEINTERLEAVE (parent);

  gboolean res;

  res = gst_pad_query_default (pad, parent, query);

  if (res && GST_QUERY_TYPE (query) == GST_QUERY_DURATION) {
    GstFormat format;

    gint64 dur;

    gst_query_parse_duration (query, &format, &dur);

    /* Need to divide by the number of channels in byte format
     * to get the correct value. All other formats should be fine
     */
    if (format == GST_FORMAT_BYTES && dur != -1)
      gst_query_set_duration (query, format,
          dur / GST_AUDIO_INFO_CHANNELS (&self->audio_info));
  } else if (res && GST_QUERY_TYPE (query) == GST_QUERY_POSITION) {
    GstFormat format;

    gint64 pos;

    gst_query_parse_position (query, &format, &pos);

    /* Need to divide by the number of channels in byte format
     * to get the correct value. All other formats should be fine
     */
    if (format == GST_FORMAT_BYTES && pos != -1)
      gst_query_set_position (query, format,
          pos / GST_AUDIO_INFO_CHANNELS (&self->audio_info));
  } else if (res && GST_QUERY_TYPE (query) == GST_QUERY_CAPS) {
    GstCaps *filter, *caps;

    gst_query_parse_caps (query, &filter);
    caps = gst_deinterleave_sink_getcaps (pad, parent, filter);
    gst_query_set_caps_result (query, caps);
    gst_caps_unref (caps);
  }

  return res;
}
static void
render_color_lines (GstAudioVisualizer * base, guint32 * vdata,
    gint16 * adata, guint num_samples)
{
  GstWaveScope *scope = (GstWaveScope *) base;
  gint channels = GST_AUDIO_INFO_CHANNELS (&base->ainfo);
  guint i, c, s, x, y, oy;
  gfloat dx, dy;
  guint w = GST_VIDEO_INFO_WIDTH (&base->vinfo);
  guint h = GST_VIDEO_INFO_HEIGHT (&base->vinfo), h1 = h - 2;
  gdouble *flt = scope->flt;
  gint x2, y2, y3, y4;

  /* draw lines */
  dx = (gfloat) (w - 1) / (gfloat) num_samples;
  dy = (h - 1) / 65536.0;
  oy = (h - 1) / 2;
  for (c = 0; c < channels; c++) {
    s = c;

    /* do first pixels */
    x2 = 0;
    filter ((gfloat) adata[s]);

    y = (guint) (oy + flt[0] * dy);
    y2 = MIN (y, h1);

    y = (guint) (oy + flt[3] * dy);
    y3 = MIN (y, h1);

    y = (guint) (oy + (flt[4] + flt[5]) * dy);
    y4 = MIN (y, h1);

    for (i = 1; i < num_samples; i++) {
      x = (guint) ((gfloat) i * dx);
      filter ((gfloat) adata[s]);

      y = (guint) (oy + flt[0] * dy);
      y = MIN (y, h1);
      draw_line_aa (vdata, x2, x, y2, y, w, 0x00FF0000);
      y2 = y;

      y = (guint) (oy + flt[3] * dy);
      y = MIN (y, h1);
      draw_line_aa (vdata, x2, x, y3, y, w, 0x0000FF00);
      y3 = y;

      y = (guint) (oy + (flt[4] + flt[5]) * dy);
      y = MIN (y, h1);
      draw_line_aa (vdata, x2, x, y4, y, w, 0x000000FF);
      y4 = y;

      x2 = x;
      s += channels;
    }
    flt += 6;
  }
}
Exemple #13
0
/* Must be called with transform lock! */
static void
alloc_history (GstIirEqualizer * equ, const GstAudioInfo * info)
{
    /* free + alloc = no memcpy */
    g_free (equ->history);
    equ->history =
        g_malloc0 (equ->history_size * GST_AUDIO_INFO_CHANNELS (info) *
                   equ->freq_band_count);
}
static gboolean
gst_wave_scope_setup (GstAudioVisualizer * bscope)
{
  GstWaveScope *scope = GST_WAVE_SCOPE (bscope);

  if (scope->flt)
    g_free (scope->flt);

  scope->flt = g_new0 (gdouble, 6 * GST_AUDIO_INFO_CHANNELS (&bscope->ainfo));

  return TRUE;
}
static gboolean
gst_audio_fx_base_iir_filter_setup (GstAudioFilter * base,
    const GstAudioInfo * info)
{
  GstAudioFXBaseIIRFilter *filter = GST_AUDIO_FX_BASE_IIR_FILTER (base);
  gboolean ret = TRUE;
  gint channels;

  g_mutex_lock (&filter->lock);
  switch (GST_AUDIO_INFO_FORMAT (info)) {
    case GST_AUDIO_FORMAT_F32:
      filter->process = (GstAudioFXBaseIIRFilterProcessFunc)
          process_32;
      break;
    case GST_AUDIO_FORMAT_F64:
      filter->process = (GstAudioFXBaseIIRFilterProcessFunc)
          process_64;
      break;
    default:
      ret = FALSE;
      break;
  }

  channels = GST_AUDIO_INFO_CHANNELS (info);

  if (channels != filter->nchannels) {
    guint i;
    GstAudioFXBaseIIRFilterChannelCtx *ctx;

    if (filter->channels) {
      for (i = 0; i < filter->nchannels; i++) {
        ctx = &filter->channels[i];

        g_free (ctx->x);
        g_free (ctx->y);
      }
      g_free (filter->channels);
    }

    filter->channels = g_new0 (GstAudioFXBaseIIRFilterChannelCtx, channels);
    for (i = 0; i < channels; i++) {
      ctx = &filter->channels[i];

      ctx->x = g_new0 (gdouble, filter->nb);
      ctx->y = g_new0 (gdouble, filter->na);
    }
    filter->nchannels = channels;
  }
  g_mutex_unlock (&filter->lock);

  return ret;
}
Exemple #16
0
static gboolean
gst_celt_enc_set_format (GstAudioEncoder * benc, GstAudioInfo * info)
{
  GstCeltEnc *enc;
  GstCaps *otherpadcaps;

  enc = GST_CELT_ENC (benc);

  enc->channels = GST_AUDIO_INFO_CHANNELS (info);
  enc->rate = GST_AUDIO_INFO_RATE (info);

  /* handle reconfigure */
  if (enc->state) {
    celt_encoder_destroy (enc->state);
    enc->state = NULL;
  }
  if (enc->mode) {
    celt_mode_destroy (enc->mode);
    enc->mode = NULL;
  }
  memset (&enc->header, 0, sizeof (enc->header));

  otherpadcaps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (enc));
  if (otherpadcaps) {
    if (!gst_caps_is_empty (otherpadcaps)) {
      GstStructure *ps = gst_caps_get_structure (otherpadcaps, 0);
      gst_structure_get_int (ps, "frame-size", &enc->frame_size);
    }
    gst_caps_unref (otherpadcaps);
  }

  if (enc->requested_frame_size > 0)
    enc->frame_size = enc->requested_frame_size;

  GST_DEBUG_OBJECT (enc, "channels=%d rate=%d frame-size=%d",
      enc->channels, enc->rate, enc->frame_size);

  if (!gst_celt_enc_setup (enc))
    return FALSE;

  /* feedback to base class */
  gst_audio_encoder_set_latency (benc,
      gst_celt_enc_get_latency (enc), gst_celt_enc_get_latency (enc));
  gst_audio_encoder_set_frame_samples_min (benc, enc->frame_size);
  gst_audio_encoder_set_frame_samples_max (benc, enc->frame_size);
  gst_audio_encoder_set_frame_max (benc, 1);

  return TRUE;
}
static gboolean
gst_space_scope_render (GstAudioVisualizer * base, GstBuffer * audio,
                        GstVideoFrame * video)
{
    GstSpaceScope *scope = GST_SPACE_SCOPE (base);
    GstMapInfo amap;
    guint num_samples;

    gst_buffer_map (audio, &amap, GST_MAP_READ);

    num_samples =
        amap.size / (GST_AUDIO_INFO_CHANNELS (&base->ainfo) * sizeof (gint16));
    scope->process (base, (guint32 *) GST_VIDEO_FRAME_PLANE_DATA (video, 0),
                    (gint16 *) amap.data, num_samples);
    gst_buffer_unmap (audio, &amap);
    return TRUE;
}
static void
pcm_config_from_spec (struct pcm_config *config,
    const GstAudioRingBufferSpec * spec)
{
  gint64 frames;

  config->format = pcm_format_from_gst (GST_AUDIO_INFO_FORMAT (&spec->info));
  config->channels = GST_AUDIO_INFO_CHANNELS (&spec->info);
  config->rate = GST_AUDIO_INFO_RATE (&spec->info);

  gst_audio_info_convert (&spec->info,
      GST_FORMAT_TIME, spec->latency_time * GST_USECOND,
      GST_FORMAT_DEFAULT /* frames */ , &frames);

  config->period_size = frames;
  config->period_count = spec->buffer_time / spec->latency_time;
}
Exemple #19
0
static void
gst_deinterleave_add_new_pads (GstDeinterleave * self, GstCaps * caps)
{
  GstPad *pad;
  guint i;

  for (i = 0; i < GST_AUDIO_INFO_CHANNELS (&self->audio_info); i++) {
    gchar *name = g_strdup_printf ("src_%u", i);
    GstCaps *srccaps;
    GstAudioInfo info;
    GstAudioFormat format = GST_AUDIO_INFO_FORMAT (&self->audio_info);
    gint rate = GST_AUDIO_INFO_RATE (&self->audio_info);
    GstAudioChannelPosition position = GST_AUDIO_CHANNEL_POSITION_MONO;
    CopyStickyEventsData data;

    /* Set channel position if we know it */
    if (self->keep_positions)
      position = GST_AUDIO_INFO_POSITION (&self->audio_info, i);

    gst_audio_info_init (&info);
    gst_audio_info_set_format (&info, format, rate, 1, &position);

    srccaps = gst_audio_info_to_caps (&info);

    pad = gst_pad_new_from_static_template (&src_template, name);
    g_free (name);

    gst_pad_use_fixed_caps (pad);
    gst_pad_set_query_function (pad,
        GST_DEBUG_FUNCPTR (gst_deinterleave_src_query));
    gst_pad_set_active (pad, TRUE);

    data.pad = pad;
    data.caps = srccaps;
    gst_pad_sticky_events_foreach (self->sink, copy_sticky_events, &data);
    if (data.caps)
      gst_pad_set_caps (pad, data.caps);
    gst_element_add_pad (GST_ELEMENT (self), pad);
    self->srcpads = g_list_prepend (self->srcpads, gst_object_ref (pad));

    gst_caps_unref (srccaps);
  }

  gst_element_no_more_pads (GST_ELEMENT (self));
  self->srcpads = g_list_reverse (self->srcpads);
}
static GstFlowReturn
gst_chromaprint_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
{
  GstChromaprint *chromaprint = GST_CHROMAPRINT (trans);
  GstAudioFilter *filter = GST_AUDIO_FILTER (trans);
  GstMapInfo map_info;
  guint nsamples;
  gint rate, channels;

  rate = GST_AUDIO_INFO_RATE (&filter->info);
  channels = GST_AUDIO_INFO_CHANNELS (&filter->info);

  if (G_UNLIKELY (rate <= 0 || channels <= 0))
    return GST_FLOW_NOT_NEGOTIATED;

  if (!chromaprint->record)
    return GST_FLOW_OK;

  if (!gst_buffer_map (buf, &map_info, GST_MAP_READ))
    return GST_FLOW_ERROR;

  nsamples = map_info.size / (channels * 2);

  if (nsamples == 0)
    goto end;

  if (chromaprint->nsamples == 0) {
    chromaprint_start (chromaprint->context, rate, channels);
  }
  chromaprint->nsamples += nsamples;
  chromaprint->duration = chromaprint->nsamples / rate;

  chromaprint_feed (chromaprint->context, map_info.data,
      map_info.size / sizeof (guint16));

  if (chromaprint->duration >= chromaprint->max_duration
      && !chromaprint->fingerprint) {
    gst_chromaprint_create_fingerprint (chromaprint);
  }

end:
  gst_buffer_unmap (buf, &map_info);

  return GST_FLOW_OK;
}
static gboolean
gst_freeverb_set_process_function (GstFreeverb * filter, GstAudioInfo * info)
{
    gint channel_index, format_index;
    const GstAudioFormatInfo *finfo = info->finfo;

    /* set processing function */
    channel_index = GST_AUDIO_INFO_CHANNELS (info) - 1;
    if (channel_index > 1 || channel_index < 0) {
        filter->process = NULL;
        return FALSE;
    }

    format_index = GST_AUDIO_FORMAT_INFO_IS_FLOAT (finfo) ? 1 : 0;

    filter->process = process_functions[channel_index][format_index];
    return TRUE;
}
Exemple #22
0
static GstFlowReturn
gst_deinterleave_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
{
  GstDeinterleave *self = GST_DEINTERLEAVE (parent);
  GstFlowReturn ret;

  g_return_val_if_fail (self->func != NULL, GST_FLOW_NOT_NEGOTIATED);
  g_return_val_if_fail (GST_AUDIO_INFO_WIDTH (&self->audio_info) > 0,
      GST_FLOW_NOT_NEGOTIATED);
  g_return_val_if_fail (GST_AUDIO_INFO_CHANNELS (&self->audio_info) > 0,
      GST_FLOW_NOT_NEGOTIATED);

  ret = gst_deinterleave_process (self, buffer);

  if (ret != GST_FLOW_OK)
    GST_DEBUG_OBJECT (self, "flow return: %s", gst_flow_get_name (ret));

  return ret;
}
static gboolean
gst_vorbis_enc_set_format (GstAudioEncoder * enc, GstAudioInfo * info)
{
  GstVorbisEnc *vorbisenc;

  vorbisenc = GST_VORBISENC (enc);

  vorbisenc->channels = GST_AUDIO_INFO_CHANNELS (info);
  vorbisenc->frequency = GST_AUDIO_INFO_RATE (info);

  /* if re-configured, we were drained and cleared already */
  if (!gst_vorbis_enc_setup (vorbisenc))
    return FALSE;

  /* feedback to base class */
  gst_audio_encoder_set_latency (enc,
      gst_vorbis_enc_get_latency (vorbisenc),
      gst_vorbis_enc_get_latency (vorbisenc));

  return TRUE;
}
GstAudioRingBufferSpec *
gst_pulse_channel_map_to_gst (const pa_channel_map * map,
    GstAudioRingBufferSpec * spec)
{
  gint i, j;
  gboolean invalid = FALSE;
  gint channels;
  GstAudioChannelPosition *pos;

  channels = GST_AUDIO_INFO_CHANNELS (&spec->info);

  g_return_val_if_fail (map->channels == channels, NULL);

  pos = spec->info.position;

  for (j = 0; j < channels; j++) {
    for (i = 0; j < channels && i < G_N_ELEMENTS (gst_pa_pos_table); i++) {
      if (map->map[j] == gst_pa_pos_table[i].pa_pos) {
        pos[j] = gst_pa_pos_table[i].gst_pos;
        break;
      }
    }
    if (i == G_N_ELEMENTS (gst_pa_pos_table))
      return NULL;
  }

  if (!invalid
      && !gst_audio_check_valid_channel_positions (pos, channels, FALSE))
    invalid = TRUE;

  if (invalid) {
    for (i = 0; i < channels; i++)
      pos[i] = GST_AUDIO_CHANNEL_POSITION_NONE;
  } else {
    if (pos[0] != GST_AUDIO_CHANNEL_POSITION_NONE)
      spec->info.flags &= ~GST_AUDIO_FLAG_UNPOSITIONED;
  }

  return spec;
}
static gboolean
gst_speex_enc_set_format (GstAudioEncoder * benc, GstAudioInfo * info)
{
  GstSpeexEnc *enc;

  enc = GST_SPEEX_ENC (benc);

  enc->channels = GST_AUDIO_INFO_CHANNELS (info);
  enc->rate = GST_AUDIO_INFO_RATE (info);

  /* handle reconfigure */
  if (enc->state) {
    speex_encoder_destroy (enc->state);
    enc->state = NULL;
  }

  if (!gst_speex_enc_setup (enc))
    return FALSE;

  /* feedback to base class */
  gst_audio_encoder_set_latency (benc,
      gst_speex_enc_get_latency (enc), gst_speex_enc_get_latency (enc));
  gst_audio_encoder_set_lookahead (benc, enc->lookahead);

  if (enc->nframes == 0) {
    /* as many frames as available input allows */
    gst_audio_encoder_set_frame_samples_min (benc, enc->frame_size);
    gst_audio_encoder_set_frame_samples_max (benc, enc->frame_size);
    gst_audio_encoder_set_frame_max (benc, 0);
  } else {
    /* exactly as many frames as configured */
    gst_audio_encoder_set_frame_samples_min (benc,
        enc->frame_size * enc->nframes);
    gst_audio_encoder_set_frame_samples_max (benc,
        enc->frame_size * enc->nframes);
    gst_audio_encoder_set_frame_max (benc, 1);
  }

  return TRUE;
}
Exemple #26
0
static gboolean
gst_ce_mp3_enc_set_src_caps (GstCeAudEnc * ceaudenc, GstAudioInfo * info,
    GstCaps ** caps, GstBuffer ** codec_data)
{
  GstCeMp3Enc *mp3enc = GST_CE_MP3ENC (ceaudenc);
  const gchar *mpegversion = NULL;
  gboolean ret = TRUE;

  ITTIAM_MP3ENC_Params *params;

  mp3enc->channels = GST_AUDIO_INFO_CHANNELS (info);
  GST_INFO_OBJECT (mp3enc, "Set src channels to %i", mp3enc->channels);
  mp3enc->rate = GST_AUDIO_INFO_RATE (info);
  if (mp3enc->rate >= 16000 & mp3enc->rate <= 24000) {
    GST_DEBUG_OBJECT (mp3enc, "Setting samples per frame to 576");
    gst_ce_audenc_set_frame_samples (ceaudenc, 576, 576);
  } else {
    GST_DEBUG_OBJECT (mp3enc, "Setting samples per frame to 1152");
    gst_ce_audenc_set_frame_samples (ceaudenc, 1152, 1152);
  }
  return ret;
}
gboolean
gst_pulse_fill_sample_spec (GstAudioRingBufferSpec * spec, pa_sample_spec * ss)
{
  if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_RAW) {
    if (!gstaudioformat_to_pasampleformat (GST_AUDIO_INFO_FORMAT (&spec->info),
            &ss->format))
      return FALSE;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_MU_LAW) {
    ss->format = PA_SAMPLE_ULAW;
  } else if (spec->type == GST_AUDIO_RING_BUFFER_FORMAT_TYPE_A_LAW) {
    ss->format = PA_SAMPLE_ALAW;
  } else
    return FALSE;

  ss->channels = GST_AUDIO_INFO_CHANNELS (&spec->info);
  ss->rate = GST_AUDIO_INFO_RATE (&spec->info);

  if (!pa_sample_spec_valid (ss))
    return FALSE;

  return TRUE;
}
static void
render_color_dots (GstAudioVisualizer * base, guint32 * vdata,
    gint16 * adata, guint num_samples)
{
  GstWaveScope *scope = (GstWaveScope *) base;
  gint channels = GST_AUDIO_INFO_CHANNELS (&base->ainfo);
  guint i, c, s, x, y, oy;
  gfloat dx, dy;
  guint w = GST_VIDEO_INFO_WIDTH (&base->vinfo);
  guint h = GST_VIDEO_INFO_HEIGHT (&base->vinfo), h1 = h - 2;
  gdouble *flt = scope->flt;

  /* draw dots */
  dx = (gfloat) w / (gfloat) num_samples;
  dy = h / 65536.0;
  oy = h / 2;
  for (c = 0; c < channels; c++) {
    s = c;
    for (i = 0; i < num_samples; i++) {
      x = (guint) ((gfloat) i * dx);
      filter ((gfloat) adata[s]);

      y = (guint) (oy + flt[0] * dy);
      y = MIN (y, h1);
      draw_dot_c (vdata, x, y, w, 0x00FF0000);

      y = (guint) (oy + flt[3] * dy);
      y = MIN (y, h1);
      draw_dot_c (vdata, x, y, w, 0x0000FF00);

      y = (guint) (oy + (flt[4] + flt[5]) * dy);
      y = MIN (y, h1);
      draw_dot_c (vdata, x, y, w, 0x000000FF);

      s += channels;
    }
    flt += 6;
  }
}
static gboolean
gst_audio_panorama_set_process_function (GstAudioPanorama * filter,
    GstAudioInfo * info)
{
  gint channel_index, format_index, method_index;
  const GstAudioFormatInfo *finfo = info->finfo;

  /* set processing function */
  channel_index = GST_AUDIO_INFO_CHANNELS (info) - 1;
  if (channel_index > 1 || channel_index < 0) {
    filter->process = NULL;
    return FALSE;
  }

  format_index = GST_AUDIO_FORMAT_INFO_IS_FLOAT (finfo) ? 1 : 0;

  method_index = filter->method;
  if (method_index >= NUM_METHODS || method_index < 0)
    method_index = METHOD_PSYCHOACOUSTIC;

  filter->process =
      panorama_process_functions[channel_index][format_index][method_index];
  return TRUE;
}
static gboolean
gst_amrnbenc_set_format (GstAudioEncoder * enc, GstAudioInfo * info)
{
  GstAmrnbEnc *amrnbenc;
  GstCaps *copy;

  amrnbenc = GST_AMRNBENC (enc);

  /* parameters already parsed for us */
  amrnbenc->rate = GST_AUDIO_INFO_RATE (info);
  amrnbenc->channels = GST_AUDIO_INFO_CHANNELS (info);

  /* we do not really accept other input, but anyway ... */
  /* this is not wrong but will sound bad */
  if (amrnbenc->channels != 1) {
    g_warning ("amrnbdec is only optimized for mono channels");
  }
  if (amrnbenc->rate != 8000) {
    g_warning ("amrnbdec is only optimized for 8000 Hz samplerate");
  }

  /* create reverse caps */
  copy = gst_caps_new_simple ("audio/AMR",
      "channels", G_TYPE_INT, amrnbenc->channels,
      "rate", G_TYPE_INT, amrnbenc->rate, NULL);

  gst_audio_encoder_set_output_format (GST_AUDIO_ENCODER (amrnbenc), copy);
  gst_caps_unref (copy);

  /* report needs to base class: hand one frame at a time */
  gst_audio_encoder_set_frame_samples_min (enc, 160);
  gst_audio_encoder_set_frame_samples_max (enc, 160);
  gst_audio_encoder_set_frame_max (enc, 1);

  return TRUE;
}