Esempio n. 1
0
static void
gst_level_set_property (GObject * object, guint prop_id,
    const GValue * value, GParamSpec * pspec)
{
  GstLevel *filter = GST_LEVEL (object);

  switch (prop_id) {
    case PROP_POST_MESSAGES:
      /* fall-through */
    case PROP_MESSAGE:
      filter->post_messages = g_value_get_boolean (value);
      break;
    case PROP_INTERVAL:
      filter->interval = g_value_get_uint64 (value);
      if (GST_AUDIO_INFO_RATE (&filter->info)) {
        filter->interval_frames =
            GST_CLOCK_TIME_TO_FRAMES (filter->interval,
            GST_AUDIO_INFO_RATE (&filter->info));
      }
      break;
    case PROP_PEAK_TTL:
      filter->decay_peak_ttl =
          gst_guint64_to_gdouble (g_value_get_uint64 (value));
      break;
    case PROP_PEAK_FALLOFF:
      filter->decay_peak_falloff = g_value_get_double (value);
      break;
    default:
      G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
      break;
  }
}
Esempio n. 2
0
static GstFlowReturn
gst_atdec_handle_frame (GstAudioDecoder * decoder, GstBuffer * buffer)
{
  AudioTimeStamp timestamp = { 0 };
  AudioStreamPacketDescription packet;
  AudioQueueBufferRef input_buffer, output_buffer;
  GstBuffer *out;
  GstMapInfo info;
  GstAudioInfo *audio_info;
  int size, out_frames;
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstATDec *atdec = GST_ATDEC (decoder);

  if (buffer == NULL)
    return GST_FLOW_OK;

  audio_info = gst_audio_decoder_get_audio_info (decoder);

  /* copy the input buffer into an AudioQueueBuffer */
  size = gst_buffer_get_size (buffer);
  AudioQueueAllocateBuffer (atdec->queue, size, &input_buffer);
  gst_buffer_extract (buffer, 0, input_buffer->mAudioData, size);
  input_buffer->mAudioDataByteSize = size;

  /* assume framed input */
  packet.mStartOffset = 0;
  packet.mVariableFramesInPacket = 1;
  packet.mDataByteSize = size;

  /* enqueue the buffer. It will get free'd once the gst_atdec_buffer_emptied
   * callback is called
   */
  AudioQueueEnqueueBuffer (atdec->queue, input_buffer, 1, &packet);

  /* figure out how many frames we need to pull out of the queue */
  out_frames = GST_CLOCK_TIME_TO_FRAMES (GST_BUFFER_DURATION (buffer),
      audio_info->rate);
  size = out_frames * audio_info->bpf;
  AudioQueueAllocateBuffer (atdec->queue, size, &output_buffer);

  /* pull the frames */
  AudioQueueOfflineRender (atdec->queue, &timestamp, output_buffer, out_frames);
  if (output_buffer->mAudioDataByteSize) {
    out =
        gst_audio_decoder_allocate_output_buffer (decoder,
        output_buffer->mAudioDataByteSize);

    gst_buffer_map (out, &info, GST_MAP_WRITE);
    memcpy (info.data, output_buffer->mAudioData,
        output_buffer->mAudioDataByteSize);
    gst_buffer_unmap (out, &info);

    flow_ret = gst_audio_decoder_finish_frame (decoder, out, 1);
  }

  AudioQueueFreeBuffer (atdec->queue, output_buffer);

  return flow_ret;
}
GstFlowReturn AudioFileReader::handleBuffer(GstAppSink* sink)
{
    GstBuffer* buffer = gst_app_sink_pull_buffer(sink);
    if (!buffer)
        return GST_FLOW_ERROR;

    GstCaps* caps = gst_buffer_get_caps(buffer);
    GstStructure* structure = gst_caps_get_structure(caps, 0);

    gint channels = 0;
    if (!gst_structure_get_int(structure, "channels", &channels) || !channels) {
        gst_caps_unref(caps);
        gst_buffer_unref(buffer);
        return GST_FLOW_ERROR;
    }

    gint sampleRate = 0;
    if (!gst_structure_get_int(structure, "rate", &sampleRate) || !sampleRate) {
        gst_caps_unref(caps);
        gst_buffer_unref(buffer);
        return GST_FLOW_ERROR;
    }

    gint width = 0;
    if (!gst_structure_get_int(structure, "width", &width) || !width) {
        gst_caps_unref(caps);
        gst_buffer_unref(buffer);
        return GST_FLOW_ERROR;
    }

    GstClockTime duration = (static_cast<guint64>(GST_BUFFER_SIZE(buffer)) * 8 * GST_SECOND) / (sampleRate * channels * width);
    int frames = GST_CLOCK_TIME_TO_FRAMES(duration, sampleRate);

    // Check the first audio channel. The buffer is supposed to store
    // data of a single channel anyway.
    GstAudioChannelPosition* positions = gst_audio_get_channel_positions(structure);
    switch (positions[0]) {
    case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
        gst_buffer_list_iterator_add(m_frontLeftBuffersIterator, buffer);
        m_channelSize += frames;
        break;
    case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
        gst_buffer_list_iterator_add(m_frontRightBuffersIterator, buffer);
        break;
    default:
        gst_buffer_unref(buffer);
        break;
    }

    g_free(positions);
    gst_caps_unref(caps);
    return GST_FLOW_OK;
}
GstFlowReturn AudioFileReader::handleSample(GstAppSink* sink)
{
    GstSample* sample = gst_app_sink_pull_sample(sink);
    if (!sample)
        return GST_FLOW_ERROR;

    GstBuffer* buffer = gst_sample_get_buffer(sample);
    if (!buffer) {
        gst_sample_unref(sample);
        return GST_FLOW_ERROR;
    }

    GstCaps* caps = gst_sample_get_caps(sample);
    if (!caps) {
        gst_sample_unref(sample);
        return GST_FLOW_ERROR;
    }

    GstAudioInfo info;
    gst_audio_info_from_caps(&info, caps);
    int frames = GST_CLOCK_TIME_TO_FRAMES(GST_BUFFER_DURATION(buffer), GST_AUDIO_INFO_RATE(&info));

    // Check the first audio channel. The buffer is supposed to store
    // data of a single channel anyway.
    switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
    case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
        gst_buffer_list_add(m_frontLeftBuffers, gst_buffer_ref(buffer));
        m_channelSize += frames;
        break;
    case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
        gst_buffer_list_add(m_frontRightBuffers, gst_buffer_ref(buffer));
        break;
    default:
        break;
    }

    gst_sample_unref(sample);
    return GST_FLOW_OK;

}
Esempio n. 5
0
static void
gst_level_recalc_interval_frames (GstLevel * level)
{
  GstClockTime interval = level->interval;
  guint sample_rate = GST_AUDIO_INFO_RATE (&level->info);
  guint interval_frames;

  interval_frames = GST_CLOCK_TIME_TO_FRAMES (interval, sample_rate);

  if (interval_frames == 0) {
    GST_WARNING_OBJECT (level, "interval %" GST_TIME_FORMAT " is too small, "
        "should be at least %" GST_TIME_FORMAT " for sample rate %u",
        GST_TIME_ARGS (interval),
        GST_TIME_ARGS (GST_FRAMES_TO_CLOCK_TIME (1, sample_rate)), sample_rate);
    interval_frames = 1;
  }

  level->interval_frames = interval_frames;

  GST_INFO_OBJECT (level, "interval_frames now %u for interval "
      "%" GST_TIME_FORMAT " and sample rate %u", interval_frames,
      GST_TIME_ARGS (interval), sample_rate);
}
Esempio n. 6
0
static GstFlowReturn
gst_wavpack_enc_chain (GstPad * pad, GstBuffer * buf)
{
  GstWavpackEnc *enc = GST_WAVPACK_ENC (gst_pad_get_parent (pad));
  uint32_t sample_count = GST_BUFFER_SIZE (buf) / 4;
  GstFlowReturn ret;

  /* reset the last returns to GST_FLOW_OK. This is only set to something else
   * while WavpackPackSamples() or more specific gst_wavpack_enc_push_block()
   * so not valid anymore */
  enc->srcpad_last_return = enc->wvcsrcpad_last_return = GST_FLOW_OK;

  GST_DEBUG ("got %u raw samples", sample_count);

  /* check if we already have a valid WavpackContext, otherwise make one */
  if (!enc->wp_context) {
    /* create raw context */
    enc->wp_context =
        WavpackOpenFileOutput (gst_wavpack_enc_push_block, &enc->wv_id,
        (enc->correction_mode > 0) ? &enc->wvc_id : NULL);
    if (!enc->wp_context) {
      GST_ELEMENT_ERROR (enc, LIBRARY, INIT, (NULL),
          ("error creating Wavpack context"));
      gst_object_unref (enc);
      gst_buffer_unref (buf);
      return GST_FLOW_ERROR;
    }

    /* set the WavpackConfig according to our parameters */
    gst_wavpack_enc_set_wp_config (enc);

    /* set the configuration to the context now that we know everything
     * and initialize the encoder */
    if (!WavpackSetConfiguration (enc->wp_context,
            enc->wp_config, (uint32_t) (-1))
        || !WavpackPackInit (enc->wp_context)) {
      GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL),
          ("error setting up wavpack encoding context"));
      WavpackCloseFile (enc->wp_context);
      gst_object_unref (enc);
      gst_buffer_unref (buf);
      return GST_FLOW_ERROR;
    }
    GST_DEBUG ("setup of encoding context successfull");
  }

  /* Save the timestamp of the first buffer. This will be later
   * used as offset for all following buffers */
  if (enc->timestamp_offset == GST_CLOCK_TIME_NONE) {
    if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
      enc->timestamp_offset = GST_BUFFER_TIMESTAMP (buf);
      enc->next_ts = GST_BUFFER_TIMESTAMP (buf);
    } else {
      enc->timestamp_offset = 0;
      enc->next_ts = 0;
    }
  }

  /* Check if we have a continous stream, if not drop some samples or the buffer or
   * insert some silence samples */
  if (enc->next_ts != GST_CLOCK_TIME_NONE &&
      GST_BUFFER_TIMESTAMP (buf) < enc->next_ts) {
    guint64 diff = enc->next_ts - GST_BUFFER_TIMESTAMP (buf);
    guint64 diff_bytes;

    GST_WARNING_OBJECT (enc, "Buffer is older than previous "
        "timestamp + duration (%" GST_TIME_FORMAT "< %" GST_TIME_FORMAT
        "), cannot handle. Clipping buffer.",
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
        GST_TIME_ARGS (enc->next_ts));

    diff_bytes =
        GST_CLOCK_TIME_TO_FRAMES (diff, enc->samplerate) * enc->channels * 2;
    if (diff_bytes >= GST_BUFFER_SIZE (buf)) {
      gst_buffer_unref (buf);
      return GST_FLOW_OK;
    }
    buf = gst_buffer_make_metadata_writable (buf);
    GST_BUFFER_DATA (buf) += diff_bytes;
    GST_BUFFER_SIZE (buf) -= diff_bytes;

    GST_BUFFER_TIMESTAMP (buf) += diff;
    if (GST_BUFFER_DURATION_IS_VALID (buf))
      GST_BUFFER_DURATION (buf) -= diff;
  }

  /* Allow a diff of at most 5 ms */
  if (enc->next_ts != GST_CLOCK_TIME_NONE
      && GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
    if (GST_BUFFER_TIMESTAMP (buf) != enc->next_ts &&
        GST_BUFFER_TIMESTAMP (buf) - enc->next_ts > 5 * GST_MSECOND) {
      GST_WARNING_OBJECT (enc,
          "Discontinuity detected: %" G_GUINT64_FORMAT " > %" G_GUINT64_FORMAT,
          GST_BUFFER_TIMESTAMP (buf) - enc->next_ts, 5 * GST_MSECOND);

      WavpackFlushSamples (enc->wp_context);
      enc->timestamp_offset += (GST_BUFFER_TIMESTAMP (buf) - enc->next_ts);
    }
  }

  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)
      && GST_BUFFER_DURATION_IS_VALID (buf))
    enc->next_ts = GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
  else
    enc->next_ts = GST_CLOCK_TIME_NONE;

  if (enc->need_channel_remap) {
    buf = gst_buffer_make_writable (buf);
    gst_wavpack_enc_fix_channel_order (enc, (gint32 *) GST_BUFFER_DATA (buf),
        sample_count);
  }

  /* if we want to append the MD5 sum to the stream update it here
   * with the current raw samples */
  if (enc->md5) {
    g_checksum_update (enc->md5_context, GST_BUFFER_DATA (buf),
        GST_BUFFER_SIZE (buf));
  }

  /* encode and handle return values from encoding */
  if (WavpackPackSamples (enc->wp_context, (int32_t *) GST_BUFFER_DATA (buf),
          sample_count / enc->channels)) {
    GST_DEBUG ("encoding samples successful");
    ret = GST_FLOW_OK;
  } else {
    if ((enc->srcpad_last_return == GST_FLOW_RESEND) ||
        (enc->wvcsrcpad_last_return == GST_FLOW_RESEND)) {
      ret = GST_FLOW_RESEND;
    } else if ((enc->srcpad_last_return == GST_FLOW_OK) ||
        (enc->wvcsrcpad_last_return == GST_FLOW_OK)) {
      ret = GST_FLOW_OK;
    } else if ((enc->srcpad_last_return == GST_FLOW_NOT_LINKED) &&
        (enc->wvcsrcpad_last_return == GST_FLOW_NOT_LINKED)) {
      ret = GST_FLOW_NOT_LINKED;
    } else if ((enc->srcpad_last_return == GST_FLOW_WRONG_STATE) &&
        (enc->wvcsrcpad_last_return == GST_FLOW_WRONG_STATE)) {
      ret = GST_FLOW_WRONG_STATE;
    } else {
      GST_ELEMENT_ERROR (enc, LIBRARY, ENCODE, (NULL),
          ("encoding samples failed"));
      ret = GST_FLOW_ERROR;
    }
  }

  gst_buffer_unref (buf);
  gst_object_unref (enc);
  return ret;
}
Esempio n. 7
0
static GstFlowReturn
gst_speex_enc_chain (GstPad * pad, GstBuffer * buf)
{
  GstSpeexEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;

  enc = GST_SPEEX_ENC (GST_PAD_PARENT (pad));

  if (!enc->setup)
    goto not_setup;

  if (!enc->header_sent) {
    /* Speex streams begin with two headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.
       We merely need to make the headers, then pass them to libspeex 
       one at a time; libspeex handles the additional Ogg bitstream 
       constraints */
    GstBuffer *buf1, *buf2;
    GstCaps *caps;
    guchar *data;
    gint data_len;

    /* create header buffer */
    data = (guint8 *) speex_header_to_packet (&enc->header, &data_len);
    buf1 = gst_speex_enc_buffer_from_data (enc, data, data_len, 0);
    free (data);

    /* create comment buffer */
    buf2 = gst_speex_enc_create_metadata_buffer (enc);

    /* mark and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = gst_speex_enc_set_header_on_caps (caps, buf1, buf2);

    gst_caps_set_simple (caps,
        "rate", G_TYPE_INT, enc->rate,
        "channels", G_TYPE_INT, enc->channels, NULL);

    /* negotiate with these caps */
    GST_DEBUG_OBJECT (enc, "here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_caps_unref (caps);

    /* push out buffers */
    ret = gst_speex_enc_push_buffer (enc, buf1);

    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf2);
      goto done;
    }

    ret = gst_speex_enc_push_buffer (enc, buf2);

    if (ret != GST_FLOW_OK)
      goto done;

    speex_bits_reset (&enc->bits);

    enc->header_sent = TRUE;
  }

  /* Save the timestamp of the first buffer. This will be later
   * used as offset for all following buffers */
  if (enc->start_ts == GST_CLOCK_TIME_NONE) {
    if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
      enc->start_ts = GST_BUFFER_TIMESTAMP (buf);
      enc->granulepos_offset = gst_util_uint64_scale
          (GST_BUFFER_TIMESTAMP (buf), enc->rate, GST_SECOND);
    } else {
      enc->start_ts = 0;
      enc->granulepos_offset = 0;
    }
  }

  /* Check if we have a continous stream, if not drop some samples or the buffer or
   * insert some silence samples */
  if (enc->next_ts != GST_CLOCK_TIME_NONE &&
      GST_BUFFER_TIMESTAMP (buf) < enc->next_ts) {
    guint64 diff = enc->next_ts - GST_BUFFER_TIMESTAMP (buf);
    guint64 diff_bytes;

    GST_WARNING_OBJECT (enc, "Buffer is older than previous "
        "timestamp + duration (%" GST_TIME_FORMAT "< %" GST_TIME_FORMAT
        "), cannot handle. Clipping buffer.",
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
        GST_TIME_ARGS (enc->next_ts));

    diff_bytes = GST_CLOCK_TIME_TO_FRAMES (diff, enc->rate) * enc->channels * 2;
    if (diff_bytes >= GST_BUFFER_SIZE (buf)) {
      gst_buffer_unref (buf);
      return GST_FLOW_OK;
    }
    buf = gst_buffer_make_metadata_writable (buf);
    GST_BUFFER_DATA (buf) += diff_bytes;
    GST_BUFFER_SIZE (buf) -= diff_bytes;

    GST_BUFFER_TIMESTAMP (buf) += diff;
    if (GST_BUFFER_DURATION_IS_VALID (buf))
      GST_BUFFER_DURATION (buf) -= diff;
  }

  if (enc->next_ts != GST_CLOCK_TIME_NONE
      && GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
    guint64 max_diff =
        gst_util_uint64_scale (enc->frame_size, GST_SECOND, enc->rate);

    if (GST_BUFFER_TIMESTAMP (buf) != enc->next_ts &&
        GST_BUFFER_TIMESTAMP (buf) - enc->next_ts > max_diff) {
      GST_WARNING_OBJECT (enc,
          "Discontinuity detected: %" G_GUINT64_FORMAT " > %" G_GUINT64_FORMAT,
          GST_BUFFER_TIMESTAMP (buf) - enc->next_ts, max_diff);

      gst_speex_enc_encode (enc, TRUE);

      enc->frameno_out = 0;
      enc->start_ts = GST_BUFFER_TIMESTAMP (buf);
      enc->granulepos_offset = gst_util_uint64_scale
          (GST_BUFFER_TIMESTAMP (buf), enc->rate, GST_SECOND);
    }
  }

  if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)
      && GST_BUFFER_DURATION_IS_VALID (buf))
    enc->next_ts = GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
  else
    enc->next_ts = GST_CLOCK_TIME_NONE;

  GST_DEBUG_OBJECT (enc, "received buffer of %u bytes", GST_BUFFER_SIZE (buf));

  /* push buffer to adapter */
  gst_adapter_push (enc->adapter, buf);
  buf = NULL;

  ret = gst_speex_enc_encode (enc, FALSE);

done:

  if (buf)
    gst_buffer_unref (buf);

  return ret;

  /* ERRORS */
not_setup:
  {
    GST_ELEMENT_ERROR (enc, CORE, NEGOTIATION, (NULL),
        ("encoder not initialized (input is not audio?)"));
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }

}
Esempio n. 8
0
static GstFlowReturn
//gst_spectrum_transform_ip (GstBaseTransform * trans, GstBuffer * in)
gst_spectrum_transform_ip (GstSpectrum * trans, GstBuffer * in)
{
  GstSpectrum *spectrum = trans;
  gint wanted;
  gint i;
  gfloat *spect_magnitude = spectrum->spect_magnitude;
  gfloat *spect_phase = spectrum->spect_phase;
  gint rate = /*GST_AUDIO_FILTER (spectrum)->format.rate*/1;
  gint channels = /*GST_AUDIO_FILTER (spectrum)->format.channels*/1;
  gint width = /*GST_AUDIO_FILTER (spectrum)->format.width*/32 / 8;
  gint nfft = 2 * spectrum->bands - 2;

  /*GstClockTime endtime =
      gst_segment_to_running_time (&trans->segment, GST_FORMAT_TIME,
      GST_BUFFER_TIMESTAMP (in));
  GstClockTime blktime = GST_FRAMES_TO_CLOCK_TIME (nfft, rate);*/

  GST_LOG_OBJECT (spectrum, "input size: %d bytes", GST_BUFFER_SIZE (in));

  /* can we do this nicer? */
  //gst_adapter_push (spectrum->adapter, gst_buffer_copy (in));
  /* required number of bytes */
  wanted = channels * nfft * width;

  while (gst_adapter_available (spectrum->adapter) >= wanted) {
    const guint8 *samples;

    samples = gst_adapter_peek (spectrum->adapter, wanted);

    spectrum->process (spectrum, samples);

    spectrum->num_frames += nfft;
    /*endtime += blktime;*/
    /* do we need to message ? */
    if (spectrum->num_frames >=
        GST_CLOCK_TIME_TO_FRAMES (spectrum->interval, rate)) {
      if (spectrum->message) {
        GstMessage *m;

        /* Calculate average */
        for (i = 0; i < spectrum->bands; i++) {
          spect_magnitude[i] /= spectrum->num_fft;
          spect_phase[i] /= spectrum->num_fft;
        }

        //m = gst_spectrum_message_new (spectrum, endtime);

        //gst_element_post_message (GST_ELEMENT (spectrum), m);
      }
      memset (spect_magnitude, 0, spectrum->bands * sizeof (gfloat));
      memset (spect_phase, 0, spectrum->bands * sizeof (gfloat));
      spectrum->num_frames = 0;
      spectrum->num_fft = 0;
    }

    gst_adapter_flush (spectrum->adapter, wanted);
  }

  return GST_FLOW_OK;
}
/**
 * gst_audio_info_convert:
 * @info: a #GstAudioInfo
 * @src_fmt: #GstFormat of the @src_val
 * @src_val: value to convert
 * @dest_fmt: #GstFormat of the @dest_val
 * @dest_val: pointer to destination value
 *
 * Converts among various #GstFormat types.  This function handles
 * GST_FORMAT_BYTES, GST_FORMAT_TIME, and GST_FORMAT_DEFAULT.  For
 * raw audio, GST_FORMAT_DEFAULT corresponds to audio frames.  This
 * function can be used to handle pad queries of the type GST_QUERY_CONVERT.
 *
 * Returns: TRUE if the conversion was successful.
 */
gboolean
gst_audio_info_convert (const GstAudioInfo * info,
    GstFormat src_fmt, gint64 src_val, GstFormat dest_fmt, gint64 * dest_val)
{
  gboolean res = TRUE;
  gint bpf, rate;

  GST_DEBUG ("converting value %" G_GINT64_FORMAT " from %s (%d) to %s (%d)",
      src_val, gst_format_get_name (src_fmt), src_fmt,
      gst_format_get_name (dest_fmt), dest_fmt);

  if (src_fmt == dest_fmt || src_val == -1) {
    *dest_val = src_val;
    goto done;
  }

  /* get important info */
  bpf = GST_AUDIO_INFO_BPF (info);
  rate = GST_AUDIO_INFO_RATE (info);

  if (bpf == 0 || rate == 0) {
    GST_DEBUG ("no rate or bpf configured");
    res = FALSE;
    goto done;
  }

  switch (src_fmt) {
    case GST_FORMAT_BYTES:
      switch (dest_fmt) {
        case GST_FORMAT_TIME:
          *dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val / bpf, rate);
          break;
        case GST_FORMAT_DEFAULT:
          *dest_val = src_val / bpf;
          break;
        default:
          res = FALSE;
          break;
      }
      break;
    case GST_FORMAT_DEFAULT:
      switch (dest_fmt) {
        case GST_FORMAT_TIME:
          *dest_val = GST_FRAMES_TO_CLOCK_TIME (src_val, rate);
          break;
        case GST_FORMAT_BYTES:
          *dest_val = src_val * bpf;
          break;
        default:
          res = FALSE;
          break;
      }
      break;
    case GST_FORMAT_TIME:
      switch (dest_fmt) {
        case GST_FORMAT_DEFAULT:
          *dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate);
          break;
        case GST_FORMAT_BYTES:
          *dest_val = GST_CLOCK_TIME_TO_FRAMES (src_val, rate);
          *dest_val *= bpf;
          break;
        default:
          res = FALSE;
          break;
      }
      break;
    default:
      res = FALSE;
      break;
  }
done:

  GST_DEBUG ("ret=%d result %" G_GINT64_FORMAT, res, res ? *dest_val : -1);

  return res;
}
Esempio n. 10
0
static gboolean
gst_level_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
{
  GstLevel *filter = GST_LEVEL (trans);
  GstAudioInfo info;
  gint i, channels, rate;

  if (!gst_audio_info_from_caps (&info, in))
    return FALSE;

  switch (GST_AUDIO_INFO_FORMAT (&info)) {
    case GST_AUDIO_FORMAT_S8:
      filter->process = gst_level_calculate_gint8;
      break;
    case GST_AUDIO_FORMAT_S16:
      filter->process = gst_level_calculate_gint16;
      break;
    case GST_AUDIO_FORMAT_S32:
      filter->process = gst_level_calculate_gint32;
      break;
    case GST_AUDIO_FORMAT_F32:
      filter->process = gst_level_calculate_gfloat;
      break;
    case GST_AUDIO_FORMAT_F64:
      filter->process = gst_level_calculate_gdouble;
      break;
    default:
      filter->process = NULL;
      break;
  }

  filter->info = info;

  channels = GST_AUDIO_INFO_CHANNELS (&info);
  rate = GST_AUDIO_INFO_RATE (&info);

  /* allocate channel variable arrays */
  g_free (filter->CS);
  g_free (filter->peak);
  g_free (filter->last_peak);
  g_free (filter->decay_peak);
  g_free (filter->decay_peak_base);
  g_free (filter->decay_peak_age);
  filter->CS = g_new (gdouble, channels);
  filter->peak = g_new (gdouble, channels);
  filter->last_peak = g_new (gdouble, channels);
  filter->decay_peak = g_new (gdouble, channels);
  filter->decay_peak_base = g_new (gdouble, channels);

  filter->decay_peak_age = g_new (GstClockTime, channels);

  for (i = 0; i < channels; ++i) {
    filter->CS[i] = filter->peak[i] = filter->last_peak[i] =
        filter->decay_peak[i] = filter->decay_peak_base[i] = 0.0;
    filter->decay_peak_age[i] = G_GUINT64_CONSTANT (0);
  }

  filter->interval_frames = GST_CLOCK_TIME_TO_FRAMES (filter->interval, rate);

  return TRUE;
}