コード例 #1
0
ファイル: gstrtpL16depay.c プロジェクト: PeterXu/gst-mobile
static GstBuffer *
gst_rtp_L16_depay_process (GstRTPBaseDepayload * depayload, GstBuffer * buf)
{
  GstRtpL16Depay *rtpL16depay;
  GstBuffer *outbuf;
  gint payload_len;
  gboolean marker;
  GstRTPBuffer rtp = { NULL };

  rtpL16depay = GST_RTP_L16_DEPAY (depayload);

  gst_rtp_buffer_map (buf, GST_MAP_READ, &rtp);
  payload_len = gst_rtp_buffer_get_payload_len (&rtp);

  if (payload_len <= 0)
    goto empty_packet;

  GST_DEBUG_OBJECT (rtpL16depay, "got payload of %d bytes", payload_len);

  outbuf = gst_rtp_buffer_get_payload_buffer (&rtp);
  marker = gst_rtp_buffer_get_marker (&rtp);

  if (marker) {
    /* mark talk spurt with DISCONT */
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
  }

  outbuf = gst_buffer_make_writable (outbuf);
  if (rtpL16depay->order &&
      !gst_audio_buffer_reorder_channels (outbuf,
          rtpL16depay->info.finfo->format, rtpL16depay->info.channels,
          rtpL16depay->info.position, rtpL16depay->order->pos)) {
    goto reorder_failed;
  }

  gst_rtp_buffer_unmap (&rtp);

  return outbuf;

  /* ERRORS */
empty_packet:
  {
    GST_ELEMENT_WARNING (rtpL16depay, STREAM, DECODE,
        ("Empty Payload."), (NULL));
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
reorder_failed:
  {
    GST_ELEMENT_ERROR (rtpL16depay, STREAM, DECODE,
        ("Channel reordering failed."), (NULL));
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
}
コード例 #2
0
static gboolean
gst_raw_audio_parse_process (GstRawBaseParse * raw_base_parse,
    GstRawBaseParseConfig config, GstBuffer * in_data, gsize total_num_in_bytes,
    gsize num_valid_in_bytes, GstBuffer ** processed_data)
{
  GstRawAudioParse *raw_audio_parse = GST_RAW_AUDIO_PARSE (raw_base_parse);
  GstRawAudioParseConfig *config_ptr =
      gst_raw_audio_parse_get_config_ptr (raw_audio_parse, config);

  if ((config_ptr->format == GST_RAW_AUDIO_PARSE_FORMAT_PCM)
      && config_ptr->needs_channel_reordering) {
    /* Need to reorder samples, since they are in an invalid
     * channel order. */

    GstBuffer *outbuf;

    GST_LOG_OBJECT (raw_audio_parse,
        "using %" G_GSIZE_FORMAT " bytes out of the %" G_GSIZE_FORMAT
        " bytes from the input buffer with reordering", num_valid_in_bytes,
        total_num_in_bytes);

    outbuf =
        gst_buffer_copy_region (in_data,
        GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
        GST_BUFFER_COPY_META | GST_BUFFER_COPY_MEMORY, 0, num_valid_in_bytes);

    gst_audio_buffer_reorder_channels (outbuf,
        config_ptr->pcm_format,
        config_ptr->num_channels,
        config_ptr->channel_positions, config_ptr->reordered_channel_positions);

    *processed_data = outbuf;
  } else {
    /* Nothing needs to be done with the sample data.
     * Instruct the baseparse class to just take out_size bytes
     * from the input buffer */

    GST_LOG_OBJECT (raw_audio_parse,
        "using %" G_GSIZE_FORMAT " bytes out of the %" G_GSIZE_FORMAT
        " bytes from the input buffer without reordering", num_valid_in_bytes,
        total_num_in_bytes);

    *processed_data = NULL;
  }

  return TRUE;
}
コード例 #3
0
static GstFlowReturn
gst_rtp_L16_pay_handle_buffer (GstRTPBasePayload * basepayload,
    GstBuffer * buffer)
{
  GstRtpL16Pay *rtpL16pay;

  rtpL16pay = GST_RTP_L16_PAY (basepayload);
  buffer = gst_buffer_make_writable (buffer);

  if (rtpL16pay->order &&
      !gst_audio_buffer_reorder_channels (buffer, rtpL16pay->info.finfo->format,
          rtpL16pay->info.channels, rtpL16pay->info.position,
          rtpL16pay->order->pos)) {
    return GST_FLOW_ERROR;
  }

  return GST_RTP_BASE_PAYLOAD_CLASS (parent_class)->handle_buffer (basepayload,
      buffer);
}
コード例 #4
0
static GstFlowReturn
opus_dec_chain_parse_data (GstOpusDec * dec, GstBuffer * buffer)
{
  GstFlowReturn res = GST_FLOW_OK;
  gsize size;
  guint8 *data;
  GstBuffer *outbuf;
  gint16 *out_data;
  int n, err;
  int samples;
  unsigned int packet_size;
  GstBuffer *buf;
  GstMapInfo map, omap;

  if (dec->state == NULL) {
    /* If we did not get any headers, default to 2 channels */
    if (dec->n_channels == 0) {
      GST_INFO_OBJECT (dec, "No header, assuming single stream");
      dec->n_channels = 2;
      dec->sample_rate = 48000;
      /* default stereo mapping */
      dec->channel_mapping_family = 0;
      dec->channel_mapping[0] = 0;
      dec->channel_mapping[1] = 1;
      dec->n_streams = 1;
      dec->n_stereo_streams = 1;

      gst_opus_dec_negotiate (dec, NULL);
    }

    GST_DEBUG_OBJECT (dec, "Creating decoder with %d channels, %d Hz",
        dec->n_channels, dec->sample_rate);
#ifndef GST_DISABLE_GST_DEBUG
    gst_opus_common_log_channel_mapping_table (GST_ELEMENT (dec), opusdec_debug,
        "Mapping table", dec->n_channels, dec->channel_mapping);
#endif

    GST_DEBUG_OBJECT (dec, "%d streams, %d stereo", dec->n_streams,
        dec->n_stereo_streams);
    dec->state =
        opus_multistream_decoder_create (dec->sample_rate, dec->n_channels,
        dec->n_streams, dec->n_stereo_streams, dec->channel_mapping, &err);
    if (!dec->state || err != OPUS_OK)
      goto creation_failed;
  }

  if (buffer) {
    GST_DEBUG_OBJECT (dec, "Received buffer of size %" G_GSIZE_FORMAT,
        gst_buffer_get_size (buffer));
  } else {
    GST_DEBUG_OBJECT (dec, "Received missing buffer");
  }

  /* if using in-band FEC, we introdude one extra frame's delay as we need
     to potentially wait for next buffer to decode a missing buffer */
  if (dec->use_inband_fec && !dec->primed) {
    GST_DEBUG_OBJECT (dec, "First buffer received in FEC mode, early out");
    gst_buffer_replace (&dec->last_buffer, buffer);
    dec->primed = TRUE;
    goto done;
  }

  /* That's the buffer we'll be sending to the opus decoder. */
  buf = (dec->use_inband_fec
      && gst_buffer_get_size (dec->last_buffer) >
      0) ? dec->last_buffer : buffer;

  if (buf && gst_buffer_get_size (buf) > 0) {
    gst_buffer_map (buf, &map, GST_MAP_READ);
    data = map.data;
    size = map.size;
    GST_DEBUG_OBJECT (dec, "Using buffer of size %" G_GSIZE_FORMAT, size);
  } else {
    /* concealment data, pass NULL as the bits parameters */
    GST_DEBUG_OBJECT (dec, "Using NULL buffer");
    data = NULL;
    size = 0;
  }

  /* use maximum size (120 ms) as the number of returned samples is
     not constant over the stream. */
  samples = 120 * dec->sample_rate / 1000;
  packet_size = samples * dec->n_channels * 2;

  outbuf =
      gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec),
      packet_size);
  if (!outbuf) {
    goto buffer_failed;
  }

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
  out_data = (gint16 *) omap.data;

  if (dec->use_inband_fec) {
    if (dec->last_buffer) {
      /* normal delayed decode */
      GST_LOG_OBJECT (dec, "FEC enabled, decoding last delayed buffer");
      n = opus_multistream_decode (dec->state, data, size, out_data, samples,
          0);
    } else {
      /* FEC reconstruction decode */
      GST_LOG_OBJECT (dec, "FEC enabled, reconstructing last buffer");
      n = opus_multistream_decode (dec->state, data, size, out_data, samples,
          1);
    }
  } else {
    /* normal decode */
    GST_LOG_OBJECT (dec, "FEC disabled, decoding buffer");
    n = opus_multistream_decode (dec->state, data, size, out_data, samples, 0);
  }
  gst_buffer_unmap (outbuf, &omap);
  if (data != NULL)
    gst_buffer_unmap (buf, &map);

  if (n < 0) {
    GST_ELEMENT_ERROR (dec, STREAM, DECODE, ("Decoding error: %d", n), (NULL));
    gst_buffer_unref (outbuf);
    return GST_FLOW_ERROR;
  }
  GST_DEBUG_OBJECT (dec, "decoded %d samples", n);
  gst_buffer_set_size (outbuf, n * 2 * dec->n_channels);

  /* Skip any samples that need skipping */
  if (dec->pre_skip > 0) {
    guint scaled_pre_skip = dec->pre_skip * dec->sample_rate / 48000;
    guint skip = scaled_pre_skip > n ? n : scaled_pre_skip;
    guint scaled_skip = skip * 48000 / dec->sample_rate;

    gst_buffer_resize (outbuf, skip * 2 * dec->n_channels, -1);
    dec->pre_skip -= scaled_skip;
    GST_INFO_OBJECT (dec,
        "Skipping %u samples (%u at 48000 Hz, %u left to skip)", skip,
        scaled_skip, dec->pre_skip);
  }

  if (gst_buffer_get_size (outbuf) == 0) {
    gst_buffer_unref (outbuf);
    outbuf = NULL;
  } else if (dec->opus_pos[0] != GST_AUDIO_CHANNEL_POSITION_INVALID) {
    gst_audio_buffer_reorder_channels (outbuf, GST_AUDIO_FORMAT_S16,
        dec->n_channels, dec->opus_pos, dec->info.position);
  }

  /* Apply gain */
  /* Would be better off leaving this to a volume element, as this is
     a naive conversion that does too many int/float conversions.
     However, we don't have control over the pipeline...
     So make it optional if the user program wants to use a volume,
     but do it by default so the correct volume goes out by default */
  if (dec->apply_gain && outbuf && dec->r128_gain) {
    gsize rsize;
    unsigned int i, nsamples;
    double volume = dec->r128_gain_volume;
    gint16 *samples;

    gst_buffer_map (outbuf, &omap, GST_MAP_READWRITE);
    samples = (gint16 *) omap.data;
    rsize = omap.size;
    GST_DEBUG_OBJECT (dec, "Applying gain: volume %f", volume);
    nsamples = rsize / 2;
    for (i = 0; i < nsamples; ++i) {
      int sample = (int) (samples[i] * volume + 0.5);
      samples[i] = sample < -32768 ? -32768 : sample > 32767 ? 32767 : sample;
    }
    gst_buffer_unmap (outbuf, &omap);
  }

  if (dec->use_inband_fec) {
    gst_buffer_replace (&dec->last_buffer, buffer);
  }

  res = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (dec), outbuf, 1);

  if (res != GST_FLOW_OK)
    GST_DEBUG_OBJECT (dec, "flow: %s", gst_flow_get_name (res));

done:
  return res;

creation_failed:
  GST_ERROR_OBJECT (dec, "Failed to create Opus decoder: %d", err);
  return GST_FLOW_ERROR;

buffer_failed:
  GST_ERROR_OBJECT (dec, "Failed to create %u byte buffer", packet_size);
  return GST_FLOW_ERROR;
}
コード例 #5
0
static GstFlowReturn
opus_dec_chain_parse_data (GstOpusDec * dec, GstBuffer * buffer)
{
  GstFlowReturn res = GST_FLOW_OK;
  gsize size;
  guint8 *data;
  GstBuffer *outbuf, *bufd;
  gint16 *out_data;
  int n, err;
  int samples;
  unsigned int packet_size;
  GstBuffer *buf;
  GstMapInfo map, omap;
  GstAudioClippingMeta *cmeta = NULL;

  if (dec->state == NULL) {
    /* If we did not get any headers, default to 2 channels */
    if (dec->n_channels == 0) {
      GST_INFO_OBJECT (dec, "No header, assuming single stream");
      dec->n_channels = 2;
      dec->sample_rate = 48000;
      /* default stereo mapping */
      dec->channel_mapping_family = 0;
      dec->channel_mapping[0] = 0;
      dec->channel_mapping[1] = 1;
      dec->n_streams = 1;
      dec->n_stereo_streams = 1;

      if (!gst_opus_dec_negotiate (dec, NULL))
        return GST_FLOW_NOT_NEGOTIATED;
    }

    if (dec->n_channels == 2 && dec->n_streams == 1
        && dec->n_stereo_streams == 0) {
      /* if we are automatically decoding 2 channels, but only have
         a single encoded one, direct both channels to it */
      dec->channel_mapping[1] = 0;
    }

    GST_DEBUG_OBJECT (dec, "Creating decoder with %d channels, %d Hz",
        dec->n_channels, dec->sample_rate);
#ifndef GST_DISABLE_GST_DEBUG
    gst_opus_common_log_channel_mapping_table (GST_ELEMENT (dec), opusdec_debug,
        "Mapping table", dec->n_channels, dec->channel_mapping);
#endif

    GST_DEBUG_OBJECT (dec, "%d streams, %d stereo", dec->n_streams,
        dec->n_stereo_streams);
    dec->state =
        opus_multistream_decoder_create (dec->sample_rate, dec->n_channels,
        dec->n_streams, dec->n_stereo_streams, dec->channel_mapping, &err);
    if (!dec->state || err != OPUS_OK)
      goto creation_failed;
  }

  if (buffer) {
    GST_DEBUG_OBJECT (dec, "Received buffer of size %" G_GSIZE_FORMAT,
        gst_buffer_get_size (buffer));
  } else {
    GST_DEBUG_OBJECT (dec, "Received missing buffer");
  }

  /* if using in-band FEC, we introdude one extra frame's delay as we need
     to potentially wait for next buffer to decode a missing buffer */
  if (dec->use_inband_fec && !dec->primed) {
    GST_DEBUG_OBJECT (dec, "First buffer received in FEC mode, early out");
    gst_buffer_replace (&dec->last_buffer, buffer);
    dec->primed = TRUE;
    goto done;
  }

  /* That's the buffer we'll be sending to the opus decoder. */
  buf = (dec->use_inband_fec
      && gst_buffer_get_size (dec->last_buffer) >
      0) ? dec->last_buffer : buffer;

  /* That's the buffer we get duration from */
  bufd = dec->use_inband_fec ? dec->last_buffer : buffer;

  if (buf && gst_buffer_get_size (buf) > 0) {
    gst_buffer_map (buf, &map, GST_MAP_READ);
    data = map.data;
    size = map.size;
    GST_DEBUG_OBJECT (dec, "Using buffer of size %" G_GSIZE_FORMAT, size);
  } else {
    /* concealment data, pass NULL as the bits parameters */
    GST_DEBUG_OBJECT (dec, "Using NULL buffer");
    data = NULL;
    size = 0;
  }

  if (gst_buffer_get_size (bufd) == 0) {
    GstClockTime const opus_plc_alignment = 2500 * GST_USECOND;
    GstClockTime aligned_missing_duration;
    GstClockTime missing_duration = GST_BUFFER_DURATION (bufd);

    if (!GST_CLOCK_TIME_IS_VALID (missing_duration) || missing_duration == 0) {
      if (GST_CLOCK_TIME_IS_VALID (dec->last_known_buffer_duration)) {
        missing_duration = dec->last_known_buffer_duration;
        GST_WARNING_OBJECT (dec,
            "Missing duration, using last duration %" GST_TIME_FORMAT,
            GST_TIME_ARGS (missing_duration));
      } else {
        GST_WARNING_OBJECT (dec,
            "Missing buffer, but unknown duration, and no previously known duration, assuming 20 ms");
        missing_duration = 20 * GST_MSECOND;
      }
    }

    GST_DEBUG_OBJECT (dec,
        "missing buffer, doing PLC duration %" GST_TIME_FORMAT
        " plus leftover %" GST_TIME_FORMAT, GST_TIME_ARGS (missing_duration),
        GST_TIME_ARGS (dec->leftover_plc_duration));

    /* add the leftover PLC duration to that of the buffer */
    missing_duration += dec->leftover_plc_duration;

    /* align the combined buffer and leftover PLC duration to multiples
     * of 2.5ms, rounding to nearest, and store excess duration for later */
    aligned_missing_duration =
        ((missing_duration +
            opus_plc_alignment / 2) / opus_plc_alignment) * opus_plc_alignment;
    dec->leftover_plc_duration = missing_duration - aligned_missing_duration;

    /* Opus' PLC cannot operate with less than 2.5ms; skip PLC
     * and accumulate the missing duration in the leftover_plc_duration
     * for the next PLC attempt */
    if (aligned_missing_duration < opus_plc_alignment) {
      GST_DEBUG_OBJECT (dec,
          "current duration %" GST_TIME_FORMAT
          " of missing data not enough for PLC (minimum needed: %"
          GST_TIME_FORMAT ") - skipping", GST_TIME_ARGS (missing_duration),
          GST_TIME_ARGS (opus_plc_alignment));
      goto done;
    }

    /* convert the duration (in nanoseconds) to sample count */
    samples =
        gst_util_uint64_scale_int (aligned_missing_duration, dec->sample_rate,
        GST_SECOND);

    GST_DEBUG_OBJECT (dec,
        "calculated PLC frame length: %" GST_TIME_FORMAT
        " num frame samples: %d new leftover: %" GST_TIME_FORMAT,
        GST_TIME_ARGS (aligned_missing_duration), samples,
        GST_TIME_ARGS (dec->leftover_plc_duration));
  } else {
    /* use maximum size (120 ms) as the number of returned samples is
       not constant over the stream. */
    samples = 120 * dec->sample_rate / 1000;
  }
  packet_size = samples * dec->n_channels * 2;

  outbuf =
      gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec),
      packet_size);
  if (!outbuf) {
    goto buffer_failed;
  }

  if (size > 0)
    dec->last_known_buffer_duration = packet_duration_opus (data, size);

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
  out_data = (gint16 *) omap.data;

  do {
    if (dec->use_inband_fec) {
      if (gst_buffer_get_size (dec->last_buffer) > 0) {
        /* normal delayed decode */
        GST_LOG_OBJECT (dec, "FEC enabled, decoding last delayed buffer");
        n = opus_multistream_decode (dec->state, data, size, out_data, samples,
            0);
      } else {
        /* FEC reconstruction decode */
        GST_LOG_OBJECT (dec, "FEC enabled, reconstructing last buffer");
        n = opus_multistream_decode (dec->state, data, size, out_data, samples,
            1);
      }
    } else {
      /* normal decode */
      GST_LOG_OBJECT (dec, "FEC disabled, decoding buffer");
      n = opus_multistream_decode (dec->state, data, size, out_data, samples,
          0);
    }
    if (n == OPUS_BUFFER_TOO_SMALL) {
      /* if too small, add 2.5 milliseconds and try again, up to the
       * Opus max size of 120 milliseconds */
      if (samples >= 120 * dec->sample_rate / 1000)
        break;
      samples += 25 * dec->sample_rate / 10000;
      packet_size = samples * dec->n_channels * 2;
      gst_buffer_unmap (outbuf, &omap);
      gst_buffer_unref (outbuf);
      outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec),
          packet_size);
      if (!outbuf) {
        goto buffer_failed;
      }
      gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
      out_data = (gint16 *) omap.data;
    }
  } while (n == OPUS_BUFFER_TOO_SMALL);
  gst_buffer_unmap (outbuf, &omap);
  if (data != NULL)
    gst_buffer_unmap (buf, &map);

  if (n < 0) {
    GstFlowReturn ret = GST_FLOW_ERROR;

    gst_buffer_unref (outbuf);
    GST_AUDIO_DECODER_ERROR (dec, 1, STREAM, DECODE, (NULL),
        ("Decoding error (%d): %s", n, opus_strerror (n)), ret);
    return ret;
  }
  GST_DEBUG_OBJECT (dec, "decoded %d samples", n);
  gst_buffer_set_size (outbuf, n * 2 * dec->n_channels);
  GST_BUFFER_DURATION (outbuf) = samples * GST_SECOND / dec->sample_rate;
  samples = n;

  cmeta = gst_buffer_get_audio_clipping_meta (buf);

  g_assert (!cmeta || cmeta->format == GST_FORMAT_DEFAULT);

  /* Skip any samples that need skipping */
  if (cmeta && cmeta->start) {
    guint pre_skip = cmeta->start;
    guint scaled_pre_skip = pre_skip * dec->sample_rate / 48000;
    guint skip = scaled_pre_skip > n ? n : scaled_pre_skip;
    guint scaled_skip = skip * 48000 / dec->sample_rate;

    gst_buffer_resize (outbuf, skip * 2 * dec->n_channels, -1);

    GST_INFO_OBJECT (dec,
        "Skipping %u samples at the beginning (%u at 48000 Hz)",
        skip, scaled_skip);
  }

  if (cmeta && cmeta->end) {
    guint post_skip = cmeta->end;
    guint scaled_post_skip = post_skip * dec->sample_rate / 48000;
    guint skip = scaled_post_skip > n ? n : scaled_post_skip;
    guint scaled_skip = skip * 48000 / dec->sample_rate;
    guint outsize = gst_buffer_get_size (outbuf);
    guint skip_bytes = skip * 2 * dec->n_channels;

    if (outsize > skip_bytes)
      outsize -= skip_bytes;
    else
      outsize = 0;

    gst_buffer_resize (outbuf, 0, outsize);

    GST_INFO_OBJECT (dec,
        "Skipping %u samples at the end (%u at 48000 Hz)", skip, scaled_skip);
  }

  if (gst_buffer_get_size (outbuf) == 0) {
    gst_buffer_unref (outbuf);
    outbuf = NULL;
  } else if (dec->opus_pos[0] != GST_AUDIO_CHANNEL_POSITION_INVALID) {
    gst_audio_buffer_reorder_channels (outbuf, GST_AUDIO_FORMAT_S16,
        dec->n_channels, dec->opus_pos, dec->info.position);
  }

  /* Apply gain */
  /* Would be better off leaving this to a volume element, as this is
     a naive conversion that does too many int/float conversions.
     However, we don't have control over the pipeline...
     So make it optional if the user program wants to use a volume,
     but do it by default so the correct volume goes out by default */
  if (dec->apply_gain && outbuf && dec->r128_gain) {
    gsize rsize;
    unsigned int i, nsamples;
    double volume = dec->r128_gain_volume;
    gint16 *samples;

    gst_buffer_map (outbuf, &omap, GST_MAP_READWRITE);
    samples = (gint16 *) omap.data;
    rsize = omap.size;
    GST_DEBUG_OBJECT (dec, "Applying gain: volume %f", volume);
    nsamples = rsize / 2;
    for (i = 0; i < nsamples; ++i) {
      int sample = (int) (samples[i] * volume + 0.5);
      samples[i] = sample < -32768 ? -32768 : sample > 32767 ? 32767 : sample;
    }
    gst_buffer_unmap (outbuf, &omap);
  }

  if (dec->use_inband_fec) {
    gst_buffer_replace (&dec->last_buffer, buffer);
  }

  res = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (dec), outbuf, 1);

  if (res != GST_FLOW_OK)
    GST_DEBUG_OBJECT (dec, "flow: %s", gst_flow_get_name (res));

done:
  return res;

creation_failed:
  GST_ELEMENT_ERROR (dec, LIBRARY, INIT, ("Failed to create Opus decoder"),
      ("Failed to create Opus decoder (%d): %s", err, opus_strerror (err)));
  return GST_FLOW_ERROR;

buffer_failed:
  GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
      ("Failed to create %u byte buffer", packet_size));
  return GST_FLOW_ERROR;
}
コード例 #6
0
static GstBuffer *
gst_rtp_L16_depay_process (GstRTPBaseDepayload * depayload, GstRTPBuffer * rtp)
{
  GstRtpL16Depay *rtpL16depay;
  GstBuffer *outbuf;
  gint payload_len;
  gboolean marker;
  GstAudioInfo *info;

  rtpL16depay = GST_RTP_L16_DEPAY (depayload);

  payload_len = gst_rtp_buffer_get_payload_len (rtp);

  if (payload_len <= 0)
    goto empty_packet;

  GST_DEBUG_OBJECT (rtpL16depay, "got payload of %d bytes", payload_len);

  outbuf = gst_rtp_buffer_get_payload_buffer (rtp);
  marker = gst_rtp_buffer_get_marker (rtp);

  if (marker) {
    /* mark talk spurt with RESYNC */
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_RESYNC);
  }

  outbuf = gst_buffer_make_writable (outbuf);
  info = &rtpL16depay->info;

  if (payload_len % ((info->finfo->width * info->channels) / 8) != 0)
    goto wrong_payload_size;

  if (rtpL16depay->order &&
      !gst_audio_buffer_reorder_channels (outbuf,
          info->finfo->format, info->channels,
          info->position, rtpL16depay->order->pos)) {
    goto reorder_failed;
  }

  gst_rtp_drop_meta (GST_ELEMENT_CAST (rtpL16depay), outbuf,
      g_quark_from_static_string (GST_META_TAG_AUDIO_STR));

  return outbuf;

  /* ERRORS */
empty_packet:
  {
    GST_ELEMENT_WARNING (rtpL16depay, STREAM, DECODE,
        ("Empty Payload."), (NULL));
    return NULL;
  }
wrong_payload_size:
  {
    GST_ELEMENT_WARNING (rtpL16depay, STREAM, DECODE,
        ("Wrong Payload Size."), (NULL));
    return NULL;
  }
reorder_failed:
  {
    GST_ELEMENT_ERROR (rtpL16depay, STREAM, DECODE,
        ("Channel reordering failed."), (NULL));
    return NULL;
  }
}
コード例 #7
0
ファイル: gstavauddec.c プロジェクト: cablelabs/gst-libav
static gint
gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec,
    AVCodec * in_plugin, guint8 * data, guint size,
    GstBuffer ** outbuf, GstFlowReturn * ret)
{
  gint len = -1;
  gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
  AVPacket packet;
  AVFrame frame;

  GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size);

  gst_avpacket_init (&packet, data, size);
  memset (&frame, 0, sizeof (frame));
  avcodec_get_frame_defaults (&frame);
  len = avcodec_decode_audio4 (ffmpegdec->context, &frame, &have_data, &packet);

  GST_DEBUG_OBJECT (ffmpegdec,
      "Decode audio: len=%d, have_data=%d", len, have_data);

  if (len >= 0 && have_data > 0) {
    BufferInfo *buffer_info = frame.opaque;
    gint nsamples, channels, byte_per_sample;
    gsize output_size;

    if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) {
      *outbuf = NULL;
      *ret = GST_FLOW_NOT_NEGOTIATED;
      len = -1;
      goto beach;
    }

    channels = ffmpegdec->info.channels;
    nsamples = frame.nb_samples;
    byte_per_sample = ffmpegdec->info.finfo->width / 8;

    /* frame.linesize[0] might contain padding, allocate only what's needed */
    output_size = nsamples * byte_per_sample * channels;

    GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
    if (buffer_info) {
      *outbuf = buffer_info->buffer;
      gst_buffer_unmap (buffer_info->buffer, &buffer_info->map);
      g_slice_free (BufferInfo, buffer_info);
      frame.opaque = NULL;
    } else if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt)
        && channels > 1) {
      gint i, j;
      GstMapInfo minfo;

      /* note: linesize[0] might contain padding, allocate only what's needed */
      *outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
          (ffmpegdec), output_size);

      gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE);

      switch (ffmpegdec->info.finfo->width) {
        case 8:{
          guint8 *odata = minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint8 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 16:{
          guint16 *odata = (guint16 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint16 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 32:{
          guint32 *odata = (guint32 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint32 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 64:{
          guint64 *odata = (guint64 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint64 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        default:
          g_assert_not_reached ();
          break;
      }
      gst_buffer_unmap (*outbuf, &minfo);
    } else {
      *outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
          (ffmpegdec), output_size);
      gst_buffer_fill (*outbuf, 0, frame.data[0], output_size);
    }

    GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %d", have_data);

    /* Reorder channels to the GStreamer channel order */
    if (ffmpegdec->needs_reorder) {
      *outbuf = gst_buffer_make_writable (*outbuf);
      gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format,
          ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout,
          ffmpegdec->info.position);
    }
  } else {
    *outbuf = NULL;
  }

beach:
  GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
      *ret, *outbuf, len);
  return len;
}
コード例 #8
0
ファイル: gstavauddec.c プロジェクト: GStreamer/gst-libav
static gint
gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec,
    AVCodec * in_plugin, guint8 * data, guint size, gint * have_data,
    GstBuffer ** outbuf, GstFlowReturn * ret)
{
  gint len = -1;
  AVPacket packet;

  GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size);

  gst_avpacket_init (&packet, data, size);
  len =
      avcodec_decode_audio4 (ffmpegdec->context, ffmpegdec->frame, have_data,
      &packet);

  GST_DEBUG_OBJECT (ffmpegdec,
      "Decode audio: len=%d, have_data=%d", len, *have_data);

  if (len >= 0 && *have_data) {
    gint nsamples, channels, byte_per_sample;
    gsize output_size;

    if (!gst_ffmpegauddec_negotiate (ffmpegdec, ffmpegdec->context,
            ffmpegdec->frame, FALSE)) {
      *outbuf = NULL;
      *ret = GST_FLOW_NOT_NEGOTIATED;
      len = -1;
      goto beach;
    }

    channels = ffmpegdec->info.channels;
    nsamples = ffmpegdec->frame->nb_samples;
    byte_per_sample = ffmpegdec->info.finfo->width / 8;

    /* ffmpegdec->frame->linesize[0] might contain padding, allocate only what's needed */
    output_size = nsamples * byte_per_sample * channels;

    GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
    if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt)
        && channels > 1) {
      gint i, j;
      GstMapInfo minfo;

      /* note: linesize[0] might contain padding, allocate only what's needed */
      *outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
          (ffmpegdec), output_size);

      gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE);

      switch (ffmpegdec->info.finfo->width) {
        case 8:{
          guint8 *odata = minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] =
                  ((const guint8 *) ffmpegdec->frame->extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 16:{
          guint16 *odata = (guint16 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] =
                  ((const guint16 *) ffmpegdec->frame->extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 32:{
          guint32 *odata = (guint32 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] =
                  ((const guint32 *) ffmpegdec->frame->extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 64:{
          guint64 *odata = (guint64 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] =
                  ((const guint64 *) ffmpegdec->frame->extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        default:
          g_assert_not_reached ();
          break;
      }
      gst_buffer_unmap (*outbuf, &minfo);
    } else {
      *outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
          (ffmpegdec), output_size);
      gst_buffer_fill (*outbuf, 0, ffmpegdec->frame->data[0], output_size);
    }

    GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %" G_GSIZE_FORMAT,
        output_size);

    /* Reorder channels to the GStreamer channel order */
    if (ffmpegdec->needs_reorder) {
      *outbuf = gst_buffer_make_writable (*outbuf);
      gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format,
          ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout,
          ffmpegdec->info.position);
    }

    /* Mark corrupted frames as corrupted */
    if (ffmpegdec->frame->flags & AV_FRAME_FLAG_CORRUPT)
      GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_CORRUPTED);
  } else {
    *outbuf = NULL;
  }

beach:
  av_frame_unref (ffmpegdec->frame);
  GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
      *ret, *outbuf, len);
  return len;
}
コード例 #9
0
static GstFlowReturn gst_imx_audio_uniaudio_dec_handle_frame(GstAudioDecoder *dec, GstBuffer *buffer)
{
	GstMapInfo in_map;
	GstBuffer *out_buffer;
	gsize avail_out_size;
	GstImxAudioUniaudioDec *imx_audio_uniaudio_dec = GST_IMX_AUDIO_UNIAUDIO_DEC(dec);
	int32 dec_ret;
	uint32 offset = 0;
	uint8 *in_buf = NULL;
	uint32 in_size = 0;
	gboolean dec_loop = TRUE, flow_error = FALSE;

	/* With some formats such as Vorbis, the first few buffers are actually redundant,
	 * since they contain codec data that was already specified in codec_data or
	 * streamheader caps earlier. If this is the case, skip these buffers. */
	if (imx_audio_uniaudio_dec->skip_header_counter < imx_audio_uniaudio_dec->num_vorbis_headers)
	{
		GST_TRACE_OBJECT(dec, "skipping header buffer #%u", imx_audio_uniaudio_dec->skip_header_counter);
		++imx_audio_uniaudio_dec->skip_header_counter;
		return gst_audio_decoder_finish_frame(dec, NULL, 1);
	}

	if (buffer != NULL)
	{
		gst_buffer_map(buffer, &in_map, GST_MAP_READ);
		in_buf = in_map.data;
		in_size = in_map.size;
	}

	while (dec_loop)
	{
		GstBuffer *tmp_buf;
		uint8 *out_buf = NULL;
		uint32 out_size = 0;

		if (buffer != NULL)
			GST_TRACE_OBJECT(dec, "feeding %" G_GUINT32_FORMAT " bytes to the decoder", (guint32)in_size);
		else
			GST_TRACE_OBJECT(dec, "draining decoder");

		dec_ret = imx_audio_uniaudio_dec->codec->decode_frame(
			imx_audio_uniaudio_dec->handle,
			in_buf, in_size,
			&offset,
			&out_buf, &out_size
		);

		GST_TRACE_OBJECT(dec, "decode_frame:  return 0x%x  offset %" G_GUINT32_FORMAT "  out_size %" G_GUINT32_FORMAT, (unsigned int)dec_ret, (guint32)offset, (guint32)out_size);

		if ((out_buf != NULL) && (out_size > 0))
		{
			tmp_buf = gst_audio_decoder_allocate_output_buffer(dec, out_size);
			tmp_buf = gst_buffer_make_writable(tmp_buf);
			gst_buffer_fill(tmp_buf, 0, out_buf, out_size);
			gst_adapter_push(imx_audio_uniaudio_dec->out_adapter, tmp_buf);
		}

		if (out_buf != NULL)
		{
			gst_imx_audio_uniaudio_dec_free(out_buf);
		}

		if ((buffer != NULL) && (offset == in_map.size))
		{
			dec_loop = FALSE;
		}

		switch (dec_ret)
		{
			case ACODEC_SUCCESS:
				break;
			case ACODEC_END_OF_STREAM:
				dec_loop = FALSE;
				break;
			case ACODEC_NOT_ENOUGH_DATA:
				break;
			case ACODEC_CAPIBILITY_CHANGE:
				break;
			default:
			{
				dec_loop = FALSE;
				flow_error = TRUE;
				GST_ELEMENT_ERROR(dec, STREAM, DECODE, ("could not decode"), ("error message: %s", imx_audio_uniaudio_dec->codec->get_last_error(imx_audio_uniaudio_dec->handle)));
			}
		}
	}

	if (buffer != NULL)
		gst_buffer_unmap(buffer, &in_map);

	if (flow_error)
		return GST_FLOW_ERROR;

	if (!(imx_audio_uniaudio_dec->has_audioinfo_set))
	{
		UniACodecParameter parameter;
		GstAudioFormat pcm_fmt;
		GstAudioInfo audio_info;

		imx_audio_uniaudio_dec->codec->get_parameter(imx_audio_uniaudio_dec->handle, UNIA_OUTPUT_PCM_FORMAT, &parameter);

		if ((parameter.outputFormat.width == 0) || (parameter.outputFormat.depth == 0))
		{
			GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "no output format available yet");
			return gst_audio_decoder_finish_frame(dec, NULL, 1);
		}

		GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "output sample width: %" G_GUINT32_FORMAT "  depth: %" G_GUINT32_FORMAT, (guint32)(parameter.outputFormat.width), (guint32)(parameter.outputFormat.depth));
		pcm_fmt = gst_audio_format_build_integer(TRUE, G_BYTE_ORDER, parameter.outputFormat.width, parameter.outputFormat.depth);

		GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "setting output format to: %s  %d Hz  %d channels", gst_audio_format_to_string(pcm_fmt), (gint)(parameter.outputFormat.samplerate), (gint)(parameter.outputFormat.channels));

		gst_imx_audio_uniaudio_dec_clear_channel_positions(imx_audio_uniaudio_dec);
		gst_imx_audio_uniaudio_dec_fill_channel_positions(imx_audio_uniaudio_dec, parameter.outputFormat.layout, parameter.outputFormat.channels);

		imx_audio_uniaudio_dec->pcm_format = pcm_fmt;
		imx_audio_uniaudio_dec->num_channels = parameter.outputFormat.channels;

		gst_audio_info_set_format(
			&audio_info,
			pcm_fmt,
			parameter.outputFormat.samplerate,
			parameter.outputFormat.channels,
			imx_audio_uniaudio_dec->reordered_channel_positions
		);
		gst_audio_decoder_set_output_format(dec, &audio_info);

		imx_audio_uniaudio_dec->has_audioinfo_set = TRUE;
	}


	avail_out_size = gst_adapter_available(imx_audio_uniaudio_dec->out_adapter);

	if (avail_out_size > 0)
	{
		out_buffer = gst_adapter_take_buffer(imx_audio_uniaudio_dec->out_adapter, avail_out_size);
		if (imx_audio_uniaudio_dec->original_channel_positions != imx_audio_uniaudio_dec->reordered_channel_positions)
		{
			gst_audio_buffer_reorder_channels(
				out_buffer,
				imx_audio_uniaudio_dec->pcm_format,
				imx_audio_uniaudio_dec->num_channels,
				imx_audio_uniaudio_dec->original_channel_positions,
				imx_audio_uniaudio_dec->reordered_channel_positions
			);
		}
		return gst_audio_decoder_finish_frame(dec, out_buffer, 1);
	}
	else
	{
		return gst_audio_decoder_finish_frame(dec, NULL, 1);
	}
}