Ejemplo n.º 1
0
static GstFlowReturn
gst_gsmdec_handle_frame (GstAudioDecoder * dec, GstBuffer * buffer)
{
  GstGSMDec *gsmdec;
  gsm_signal *out_data;
  gsm_byte *data;
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *outbuf;
  GstMapInfo map, omap;
  gsize outsize;
  guint frames, i, errors = 0;

  /* no fancy draining */
  if (G_UNLIKELY (!buffer))
    return GST_FLOW_OK;

  gsmdec = GST_GSMDEC (dec);

  gst_buffer_map (buffer, &map, GST_MAP_READ);

  frames = gst_gsmdec_get_frame_count (gsmdec, map.size);

  /* always the same amount of output samples (20ms worth per frame) */
  outsize = ENCODED_SAMPLES * frames * sizeof (gsm_signal);
  outbuf = gst_buffer_new_and_alloc (outsize);

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
  out_data = (gsm_signal *) omap.data;
  data = (gsm_byte *) map.data;

  for (i = 0; i < frames; ++i) {
    /* now encode frame into the output buffer */
    if (gsm_decode (gsmdec->state, data, out_data) < 0) {
      /* invalid frame */
      GST_AUDIO_DECODER_ERROR (gsmdec, 1, STREAM, DECODE, (NULL),
          ("tried to decode an invalid frame"), ret);
      memset (out_data, 0, ENCODED_SAMPLES * sizeof (gsm_signal));
      ++errors;
    }
    out_data += ENCODED_SAMPLES;
    data += gsmdec->needed;
    if (gsmdec->use_wav49)
      gsmdec->needed = (gsmdec->needed == 33 ? 32 : 33);
  }

  gst_buffer_unmap (outbuf, &omap);
  gst_buffer_unmap (buffer, &map);

  if (errors == frames) {
    gst_buffer_unref (outbuf);
    outbuf = NULL;
  }

  gst_audio_decoder_finish_frame (dec, outbuf, 1);

  return ret;
}
Ejemplo n.º 2
0
static GstFlowReturn
gst_gsmdec_handle_frame (GstAudioDecoder * dec, GstBuffer * buffer)
{
  GstGSMDec *gsmdec;
  gsm_byte *data;
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *outbuf;
  GstMapInfo map, omap;

  /* no fancy draining */
  if (G_UNLIKELY (!buffer))
    return GST_FLOW_OK;

  gsmdec = GST_GSMDEC (dec);

  /* always the same amount of output samples */
  outbuf = gst_buffer_new_and_alloc (ENCODED_SAMPLES * sizeof (gsm_signal));

  /* now encode frame into the output buffer */
  gst_buffer_map (buffer, &map, GST_MAP_READ);
  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
  data = (gsm_byte *) map.data;
  if (gsm_decode (gsmdec->state, data, (gsm_signal *) omap.data) < 0) {
    /* invalid frame */
    GST_AUDIO_DECODER_ERROR (gsmdec, 1, STREAM, DECODE, (NULL),
        ("tried to decode an invalid frame"), ret);
    gst_buffer_unmap (outbuf, &omap);
    gst_buffer_unref (outbuf);
    outbuf = NULL;
  } else {
    gst_buffer_unmap (outbuf, &omap);
  }

  gst_buffer_unmap (buffer, &map);

  gst_audio_decoder_finish_frame (dec, outbuf, 1);

  return ret;
}
Ejemplo n.º 3
0
static GstFlowReturn
gst_wavpack_dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buf)
{
  GstWavpackDec *dec;
  GstBuffer *outbuf = NULL;
  GstFlowReturn ret = GST_FLOW_OK;
  WavpackHeader wph;
  int32_t decoded, unpacked_size;
  gboolean format_changed;
  gint width, depth, i, j, max;
  gint32 *dec_data = NULL;
  guint8 *out_data;
  GstMapInfo map, omap;

  dec = GST_WAVPACK_DEC (bdec);

  g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);

  gst_buffer_map (buf, &map, GST_MAP_READ);

  /* check input, we only accept framed input with complete chunks */
  if (map.size < sizeof (WavpackHeader))
    goto input_not_framed;

  if (!gst_wavpack_read_header (&wph, map.data))
    goto invalid_header;

  if (map.size < wph.ckSize + 4 * 1 + 4)
    goto input_not_framed;

  if (!(wph.flags & INITIAL_BLOCK))
    goto input_not_framed;

  dec->wv_id.buffer = map.data;
  dec->wv_id.length = map.size;
  dec->wv_id.position = 0;

  /* create a new wavpack context if there is none yet but if there
   * was already one (i.e. caps were set on the srcpad) check whether
   * the new one has the same caps */
  if (!dec->context) {
    gchar error_msg[80];

    dec->context = WavpackOpenFileInputEx (dec->stream_reader,
        &dec->wv_id, NULL, error_msg, OPEN_STREAMING, 0);

    /* expect this to work */
    if (!dec->context) {
      GST_WARNING_OBJECT (dec, "Couldn't decode buffer: %s", error_msg);
      goto context_failed;
    }
  }

  g_assert (dec->context != NULL);

  format_changed =
      (dec->sample_rate != WavpackGetSampleRate (dec->context)) ||
      (dec->channels != WavpackGetNumChannels (dec->context)) ||
      (dec->depth != WavpackGetBytesPerSample (dec->context) * 8) ||
      (dec->channel_mask != WavpackGetChannelMask (dec->context));

  if (!gst_pad_has_current_caps (GST_AUDIO_DECODER_SRC_PAD (dec)) ||
      format_changed) {
    gint channel_mask;

    dec->sample_rate = WavpackGetSampleRate (dec->context);
    dec->channels = WavpackGetNumChannels (dec->context);
    dec->depth = WavpackGetBytesPerSample (dec->context) * 8;

    channel_mask = WavpackGetChannelMask (dec->context);
    if (channel_mask == 0)
      channel_mask = gst_wavpack_get_default_channel_mask (dec->channels);

    dec->channel_mask = channel_mask;

    gst_wavpack_dec_negotiate (dec);

    /* send GST_TAG_AUDIO_CODEC and GST_TAG_BITRATE tags before something
     * is decoded or after the format has changed */
    gst_wavpack_dec_post_tags (dec);
  }

  /* alloc output buffer */
  dec_data = g_malloc (4 * wph.block_samples * dec->channels);

  /* decode */
  decoded = WavpackUnpackSamples (dec->context, dec_data, wph.block_samples);
  if (decoded != wph.block_samples)
    goto decode_error;

  unpacked_size = (dec->width / 8) * wph.block_samples * dec->channels;
  outbuf = gst_buffer_new_and_alloc (unpacked_size);

  /* legacy; pass along offset, whatever that might entail */
  GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf);

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
  out_data = omap.data;

  width = dec->width;
  depth = dec->depth;
  max = dec->channels * wph.block_samples;
  if (width == 8) {
    gint8 *outbuffer = (gint8 *) out_data;
    gint *reorder_map = dec->channel_reorder_map;

    for (i = 0; i < max; i += dec->channels) {
      for (j = 0; j < dec->channels; j++)
        *outbuffer++ = (gint8) (dec_data[i + reorder_map[j]]);
    }
  } else if (width == 16) {
    gint16 *outbuffer = (gint16 *) out_data;
    gint *reorder_map = dec->channel_reorder_map;

    for (i = 0; i < max; i += dec->channels) {
      for (j = 0; j < dec->channels; j++)
        *outbuffer++ = (gint16) (dec_data[i + reorder_map[j]]);
    }
  } else if (dec->width == 32) {
    gint32 *outbuffer = (gint32 *) out_data;
    gint *reorder_map = dec->channel_reorder_map;

    if (width != depth) {
      for (i = 0; i < max; i += dec->channels) {
        for (j = 0; j < dec->channels; j++)
          *outbuffer++ =
              (gint32) (dec_data[i + reorder_map[j]] << (width - depth));
      }
    } else {
      for (i = 0; i < max; i += dec->channels) {
        for (j = 0; j < dec->channels; j++)
          *outbuffer++ = (gint32) (dec_data[i + reorder_map[j]]);
      }
    }
  } else {
    g_assert_not_reached ();
  }

  gst_buffer_unmap (outbuf, &omap);
  gst_buffer_unmap (buf, &map);
  buf = NULL;

  g_free (dec_data);

  ret = gst_audio_decoder_finish_frame (bdec, outbuf, 1);

out:
  if (buf)
    gst_buffer_unmap (buf, &map);

  if (G_UNLIKELY (ret != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (dec, "flow: %s", gst_flow_get_name (ret));
  }

  return ret;

/* ERRORS */
input_not_framed:
  {
    GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("Expected framed input"));
    ret = GST_FLOW_ERROR;
    goto out;
  }
invalid_header:
  {
    GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("Invalid wavpack header"));
    ret = GST_FLOW_ERROR;
    goto out;
  }
context_failed:
  {
    GST_AUDIO_DECODER_ERROR (bdec, 1, LIBRARY, INIT, (NULL),
        ("error creating Wavpack context"), ret);
    goto out;
  }
decode_error:
  {
    const gchar *reason = "unknown";

    if (dec->context) {
      reason = WavpackGetErrorMessage (dec->context);
    } else {
      reason = "couldn't create decoder context";
    }
    GST_AUDIO_DECODER_ERROR (bdec, 1, STREAM, DECODE, (NULL),
        ("decoding error: %s", reason), ret);
    g_free (dec_data);
    if (ret == GST_FLOW_OK)
      gst_audio_decoder_finish_frame (bdec, NULL, 1);
    goto out;
  }
}
static GstFlowReturn
opus_dec_chain_parse_data (GstOpusDec * dec, GstBuffer * buffer)
{
  GstFlowReturn res = GST_FLOW_OK;
  gsize size;
  guint8 *data;
  GstBuffer *outbuf, *bufd;
  gint16 *out_data;
  int n, err;
  int samples;
  unsigned int packet_size;
  GstBuffer *buf;
  GstMapInfo map, omap;
  GstAudioClippingMeta *cmeta = NULL;

  if (dec->state == NULL) {
    /* If we did not get any headers, default to 2 channels */
    if (dec->n_channels == 0) {
      GST_INFO_OBJECT (dec, "No header, assuming single stream");
      dec->n_channels = 2;
      dec->sample_rate = 48000;
      /* default stereo mapping */
      dec->channel_mapping_family = 0;
      dec->channel_mapping[0] = 0;
      dec->channel_mapping[1] = 1;
      dec->n_streams = 1;
      dec->n_stereo_streams = 1;

      if (!gst_opus_dec_negotiate (dec, NULL))
        return GST_FLOW_NOT_NEGOTIATED;
    }

    if (dec->n_channels == 2 && dec->n_streams == 1
        && dec->n_stereo_streams == 0) {
      /* if we are automatically decoding 2 channels, but only have
         a single encoded one, direct both channels to it */
      dec->channel_mapping[1] = 0;
    }

    GST_DEBUG_OBJECT (dec, "Creating decoder with %d channels, %d Hz",
        dec->n_channels, dec->sample_rate);
#ifndef GST_DISABLE_GST_DEBUG
    gst_opus_common_log_channel_mapping_table (GST_ELEMENT (dec), opusdec_debug,
        "Mapping table", dec->n_channels, dec->channel_mapping);
#endif

    GST_DEBUG_OBJECT (dec, "%d streams, %d stereo", dec->n_streams,
        dec->n_stereo_streams);
    dec->state =
        opus_multistream_decoder_create (dec->sample_rate, dec->n_channels,
        dec->n_streams, dec->n_stereo_streams, dec->channel_mapping, &err);
    if (!dec->state || err != OPUS_OK)
      goto creation_failed;
  }

  if (buffer) {
    GST_DEBUG_OBJECT (dec, "Received buffer of size %" G_GSIZE_FORMAT,
        gst_buffer_get_size (buffer));
  } else {
    GST_DEBUG_OBJECT (dec, "Received missing buffer");
  }

  /* if using in-band FEC, we introdude one extra frame's delay as we need
     to potentially wait for next buffer to decode a missing buffer */
  if (dec->use_inband_fec && !dec->primed) {
    GST_DEBUG_OBJECT (dec, "First buffer received in FEC mode, early out");
    gst_buffer_replace (&dec->last_buffer, buffer);
    dec->primed = TRUE;
    goto done;
  }

  /* That's the buffer we'll be sending to the opus decoder. */
  buf = (dec->use_inband_fec
      && gst_buffer_get_size (dec->last_buffer) >
      0) ? dec->last_buffer : buffer;

  /* That's the buffer we get duration from */
  bufd = dec->use_inband_fec ? dec->last_buffer : buffer;

  if (buf && gst_buffer_get_size (buf) > 0) {
    gst_buffer_map (buf, &map, GST_MAP_READ);
    data = map.data;
    size = map.size;
    GST_DEBUG_OBJECT (dec, "Using buffer of size %" G_GSIZE_FORMAT, size);
  } else {
    /* concealment data, pass NULL as the bits parameters */
    GST_DEBUG_OBJECT (dec, "Using NULL buffer");
    data = NULL;
    size = 0;
  }

  if (gst_buffer_get_size (bufd) == 0) {
    GstClockTime const opus_plc_alignment = 2500 * GST_USECOND;
    GstClockTime aligned_missing_duration;
    GstClockTime missing_duration = GST_BUFFER_DURATION (bufd);

    if (!GST_CLOCK_TIME_IS_VALID (missing_duration) || missing_duration == 0) {
      if (GST_CLOCK_TIME_IS_VALID (dec->last_known_buffer_duration)) {
        missing_duration = dec->last_known_buffer_duration;
        GST_WARNING_OBJECT (dec,
            "Missing duration, using last duration %" GST_TIME_FORMAT,
            GST_TIME_ARGS (missing_duration));
      } else {
        GST_WARNING_OBJECT (dec,
            "Missing buffer, but unknown duration, and no previously known duration, assuming 20 ms");
        missing_duration = 20 * GST_MSECOND;
      }
    }

    GST_DEBUG_OBJECT (dec,
        "missing buffer, doing PLC duration %" GST_TIME_FORMAT
        " plus leftover %" GST_TIME_FORMAT, GST_TIME_ARGS (missing_duration),
        GST_TIME_ARGS (dec->leftover_plc_duration));

    /* add the leftover PLC duration to that of the buffer */
    missing_duration += dec->leftover_plc_duration;

    /* align the combined buffer and leftover PLC duration to multiples
     * of 2.5ms, rounding to nearest, and store excess duration for later */
    aligned_missing_duration =
        ((missing_duration +
            opus_plc_alignment / 2) / opus_plc_alignment) * opus_plc_alignment;
    dec->leftover_plc_duration = missing_duration - aligned_missing_duration;

    /* Opus' PLC cannot operate with less than 2.5ms; skip PLC
     * and accumulate the missing duration in the leftover_plc_duration
     * for the next PLC attempt */
    if (aligned_missing_duration < opus_plc_alignment) {
      GST_DEBUG_OBJECT (dec,
          "current duration %" GST_TIME_FORMAT
          " of missing data not enough for PLC (minimum needed: %"
          GST_TIME_FORMAT ") - skipping", GST_TIME_ARGS (missing_duration),
          GST_TIME_ARGS (opus_plc_alignment));
      goto done;
    }

    /* convert the duration (in nanoseconds) to sample count */
    samples =
        gst_util_uint64_scale_int (aligned_missing_duration, dec->sample_rate,
        GST_SECOND);

    GST_DEBUG_OBJECT (dec,
        "calculated PLC frame length: %" GST_TIME_FORMAT
        " num frame samples: %d new leftover: %" GST_TIME_FORMAT,
        GST_TIME_ARGS (aligned_missing_duration), samples,
        GST_TIME_ARGS (dec->leftover_plc_duration));
  } else {
    /* use maximum size (120 ms) as the number of returned samples is
       not constant over the stream. */
    samples = 120 * dec->sample_rate / 1000;
  }
  packet_size = samples * dec->n_channels * 2;

  outbuf =
      gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec),
      packet_size);
  if (!outbuf) {
    goto buffer_failed;
  }

  if (size > 0)
    dec->last_known_buffer_duration = packet_duration_opus (data, size);

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
  out_data = (gint16 *) omap.data;

  do {
    if (dec->use_inband_fec) {
      if (gst_buffer_get_size (dec->last_buffer) > 0) {
        /* normal delayed decode */
        GST_LOG_OBJECT (dec, "FEC enabled, decoding last delayed buffer");
        n = opus_multistream_decode (dec->state, data, size, out_data, samples,
            0);
      } else {
        /* FEC reconstruction decode */
        GST_LOG_OBJECT (dec, "FEC enabled, reconstructing last buffer");
        n = opus_multistream_decode (dec->state, data, size, out_data, samples,
            1);
      }
    } else {
      /* normal decode */
      GST_LOG_OBJECT (dec, "FEC disabled, decoding buffer");
      n = opus_multistream_decode (dec->state, data, size, out_data, samples,
          0);
    }
    if (n == OPUS_BUFFER_TOO_SMALL) {
      /* if too small, add 2.5 milliseconds and try again, up to the
       * Opus max size of 120 milliseconds */
      if (samples >= 120 * dec->sample_rate / 1000)
        break;
      samples += 25 * dec->sample_rate / 10000;
      packet_size = samples * dec->n_channels * 2;
      gst_buffer_unmap (outbuf, &omap);
      gst_buffer_unref (outbuf);
      outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec),
          packet_size);
      if (!outbuf) {
        goto buffer_failed;
      }
      gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
      out_data = (gint16 *) omap.data;
    }
  } while (n == OPUS_BUFFER_TOO_SMALL);
  gst_buffer_unmap (outbuf, &omap);
  if (data != NULL)
    gst_buffer_unmap (buf, &map);

  if (n < 0) {
    GstFlowReturn ret = GST_FLOW_ERROR;

    gst_buffer_unref (outbuf);
    GST_AUDIO_DECODER_ERROR (dec, 1, STREAM, DECODE, (NULL),
        ("Decoding error (%d): %s", n, opus_strerror (n)), ret);
    return ret;
  }
  GST_DEBUG_OBJECT (dec, "decoded %d samples", n);
  gst_buffer_set_size (outbuf, n * 2 * dec->n_channels);
  GST_BUFFER_DURATION (outbuf) = samples * GST_SECOND / dec->sample_rate;
  samples = n;

  cmeta = gst_buffer_get_audio_clipping_meta (buf);

  g_assert (!cmeta || cmeta->format == GST_FORMAT_DEFAULT);

  /* Skip any samples that need skipping */
  if (cmeta && cmeta->start) {
    guint pre_skip = cmeta->start;
    guint scaled_pre_skip = pre_skip * dec->sample_rate / 48000;
    guint skip = scaled_pre_skip > n ? n : scaled_pre_skip;
    guint scaled_skip = skip * 48000 / dec->sample_rate;

    gst_buffer_resize (outbuf, skip * 2 * dec->n_channels, -1);

    GST_INFO_OBJECT (dec,
        "Skipping %u samples at the beginning (%u at 48000 Hz)",
        skip, scaled_skip);
  }

  if (cmeta && cmeta->end) {
    guint post_skip = cmeta->end;
    guint scaled_post_skip = post_skip * dec->sample_rate / 48000;
    guint skip = scaled_post_skip > n ? n : scaled_post_skip;
    guint scaled_skip = skip * 48000 / dec->sample_rate;
    guint outsize = gst_buffer_get_size (outbuf);
    guint skip_bytes = skip * 2 * dec->n_channels;

    if (outsize > skip_bytes)
      outsize -= skip_bytes;
    else
      outsize = 0;

    gst_buffer_resize (outbuf, 0, outsize);

    GST_INFO_OBJECT (dec,
        "Skipping %u samples at the end (%u at 48000 Hz)", skip, scaled_skip);
  }

  if (gst_buffer_get_size (outbuf) == 0) {
    gst_buffer_unref (outbuf);
    outbuf = NULL;
  } else if (dec->opus_pos[0] != GST_AUDIO_CHANNEL_POSITION_INVALID) {
    gst_audio_buffer_reorder_channels (outbuf, GST_AUDIO_FORMAT_S16,
        dec->n_channels, dec->opus_pos, dec->info.position);
  }

  /* Apply gain */
  /* Would be better off leaving this to a volume element, as this is
     a naive conversion that does too many int/float conversions.
     However, we don't have control over the pipeline...
     So make it optional if the user program wants to use a volume,
     but do it by default so the correct volume goes out by default */
  if (dec->apply_gain && outbuf && dec->r128_gain) {
    gsize rsize;
    unsigned int i, nsamples;
    double volume = dec->r128_gain_volume;
    gint16 *samples;

    gst_buffer_map (outbuf, &omap, GST_MAP_READWRITE);
    samples = (gint16 *) omap.data;
    rsize = omap.size;
    GST_DEBUG_OBJECT (dec, "Applying gain: volume %f", volume);
    nsamples = rsize / 2;
    for (i = 0; i < nsamples; ++i) {
      int sample = (int) (samples[i] * volume + 0.5);
      samples[i] = sample < -32768 ? -32768 : sample > 32767 ? 32767 : sample;
    }
    gst_buffer_unmap (outbuf, &omap);
  }

  if (dec->use_inband_fec) {
    gst_buffer_replace (&dec->last_buffer, buffer);
  }

  res = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (dec), outbuf, 1);

  if (res != GST_FLOW_OK)
    GST_DEBUG_OBJECT (dec, "flow: %s", gst_flow_get_name (res));

done:
  return res;

creation_failed:
  GST_ELEMENT_ERROR (dec, LIBRARY, INIT, ("Failed to create Opus decoder"),
      ("Failed to create Opus decoder (%d): %s", err, opus_strerror (err)));
  return GST_FLOW_ERROR;

buffer_failed:
  GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL),
      ("Failed to create %u byte buffer", packet_size));
  return GST_FLOW_ERROR;
}
Ejemplo n.º 5
0
static GstFlowReturn
gst_dtsdec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buffer)
{
  GstDtsDec *dts;
  gint channels, i, num_blocks;
  gboolean need_renegotiation = FALSE;
  guint8 *data;
  gsize size;
  GstMapInfo map;
  gint chans;
  gint length = 0, flags, sample_rate, bit_rate, frame_length;
  GstFlowReturn result = GST_FLOW_OK;
  GstBuffer *outbuf;

  dts = GST_DTSDEC (bdec);

  /* no fancy draining */
  if (G_UNLIKELY (!buffer))
    return GST_FLOW_OK;

  /* parsed stuff already, so this should work out fine */
  gst_buffer_map (buffer, &map, GST_MAP_READ);
  data = map.data;
  size = map.size;
  g_assert (size >= 7);

  bit_rate = dts->bit_rate;
  sample_rate = dts->sample_rate;
  flags = 0;
  length = dca_syncinfo (dts->state, data, &flags, &sample_rate, &bit_rate,
      &frame_length);
  g_assert (length == size);

  if (flags != dts->prev_flags) {
    dts->prev_flags = flags;
    dts->flag_update = TRUE;
  }

  /* go over stream properties, renegotiate or update streaminfo if needed */
  if (dts->sample_rate != sample_rate) {
    need_renegotiation = TRUE;
    dts->sample_rate = sample_rate;
  }

  if (flags) {
    dts->stream_channels = flags & (DCA_CHANNEL_MASK | DCA_LFE);
  }

  if (bit_rate != dts->bit_rate) {
    dts->bit_rate = bit_rate;
    gst_dtsdec_update_streaminfo (dts);
  }

  /* If we haven't had an explicit number of channels chosen through properties
   * at this point, choose what to downmix to now, based on what the peer will 
   * accept - this allows a52dec to do downmixing in preference to a 
   * downstream element such as audioconvert.
   * FIXME: Add the property back in for forcing output channels.
   */
  if (dts->request_channels != DCA_CHANNEL) {
    flags = dts->request_channels;
  } else if (dts->flag_update) {
    GstCaps *caps;

    dts->flag_update = FALSE;

    caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (dts));
    if (caps && gst_caps_get_size (caps) > 0) {
      GstCaps *copy = gst_caps_copy_nth (caps, 0);
      GstStructure *structure = gst_caps_get_structure (copy, 0);
      gint channels;
      const int dts_channels[6] = {
        DCA_MONO,
        DCA_STEREO,
        DCA_STEREO | DCA_LFE,
        DCA_2F2R,
        DCA_2F2R | DCA_LFE,
        DCA_3F2R | DCA_LFE,
      };

      /* Prefer the original number of channels, but fixate to something 
       * preferred (first in the caps) downstream if possible.
       */
      gst_structure_fixate_field_nearest_int (structure, "channels",
          flags ? gst_dtsdec_channels (flags, NULL) : 6);
      gst_structure_get_int (structure, "channels", &channels);
      if (channels <= 6)
        flags = dts_channels[channels - 1];
      else
        flags = dts_channels[5];

      gst_caps_unref (copy);
    } else if (flags) {
      flags = dts->stream_channels;
    } else {
      flags = DCA_3F2R | DCA_LFE;
    }

    if (caps)
      gst_caps_unref (caps);
  } else {
    flags = dts->using_channels;
  }

  /* process */
  flags |= DCA_ADJUST_LEVEL;
  dts->level = 1;
  if (dca_frame (dts->state, data, &flags, &dts->level, dts->bias)) {
    gst_buffer_unmap (buffer, &map);
    GST_AUDIO_DECODER_ERROR (dts, 1, STREAM, DECODE, (NULL),
        ("dts_frame error"), result);
    goto exit;
  }
  gst_buffer_unmap (buffer, &map);

  channels = flags & (DCA_CHANNEL_MASK | DCA_LFE);
  if (dts->using_channels != channels) {
    need_renegotiation = TRUE;
    dts->using_channels = channels;
  }

  /* negotiate if required */
  if (need_renegotiation) {
    GST_DEBUG_OBJECT (dts,
        "dtsdec: sample_rate:%d stream_chans:0x%x using_chans:0x%x",
        dts->sample_rate, dts->stream_channels, dts->using_channels);
    if (!gst_dtsdec_renegotiate (dts))
      goto failed_negotiation;
  }

  if (dts->dynamic_range_compression == FALSE) {
    dca_dynrng (dts->state, NULL, NULL);
  }

  flags &= (DCA_CHANNEL_MASK | DCA_LFE);
  chans = gst_dtsdec_channels (flags, NULL);
  if (!chans)
    goto invalid_flags;

  /* handle decoded data, one block is 256 samples */
  num_blocks = dca_blocks_num (dts->state);
  outbuf =
      gst_buffer_new_and_alloc (256 * chans * (SAMPLE_WIDTH / 8) * num_blocks);

  gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
  data = map.data;
  size = map.size;
  {
    guint8 *ptr = data;
    for (i = 0; i < num_blocks; i++) {
      if (dca_block (dts->state)) {
        /* also marks discont */
        GST_AUDIO_DECODER_ERROR (dts, 1, STREAM, DECODE, (NULL),
            ("error decoding block %d", i), result);
        if (result != GST_FLOW_OK)
          goto exit;
      } else {
        gint n, c;
        gint *reorder_map = dts->channel_reorder_map;

        for (n = 0; n < 256; n++) {
          for (c = 0; c < chans; c++) {
            ((sample_t *) ptr)[n * chans + reorder_map[c]] =
                dts->samples[c * 256 + n];
          }
        }
      }
      ptr += 256 * chans * (SAMPLE_WIDTH / 8);
    }
  }
  gst_buffer_unmap (outbuf, &map);

  result = gst_audio_decoder_finish_frame (bdec, outbuf, 1);

exit:
  return result;

  /* ERRORS */
failed_negotiation:
  {
    GST_ELEMENT_ERROR (dts, CORE, NEGOTIATION, (NULL), (NULL));
    return GST_FLOW_ERROR;
  }
invalid_flags:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dts), STREAM, DECODE, (NULL),
        ("Invalid channel flags: %d", flags));
    return GST_FLOW_ERROR;
  }
}
Ejemplo n.º 6
0
static GstFlowReturn
gst_siren_dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buf)
{
  GstSirenDec *dec;
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *out_buf;
  guint8 *in_data, *out_data;
  guint i, size, num_frames;
  gint out_size, in_size;
  gint decode_ret;
  GstMapInfo inmap, outmap;

  dec = GST_SIREN_DEC (bdec);

  size = gst_buffer_get_size (buf);

  GST_LOG_OBJECT (dec, "Received buffer of size %u", size);

  g_return_val_if_fail (size % 40 == 0, GST_FLOW_ERROR);
  g_return_val_if_fail (size > 0, GST_FLOW_ERROR);

  /* process 40 input bytes into 640 output bytes */
  num_frames = size / 40;

  /* this is the input/output size */
  in_size = num_frames * 40;
  out_size = num_frames * 640;

  GST_LOG_OBJECT (dec, "we have %u frames, %u in, %u out", num_frames, in_size,
      out_size);

  out_buf = gst_audio_decoder_allocate_output_buffer (bdec, out_size);
  if (out_buf == NULL)
    goto alloc_failed;

  /* get the input data for all the frames */
  gst_buffer_map (buf, &inmap, GST_MAP_READ);
  gst_buffer_map (out_buf, &outmap, GST_MAP_WRITE);

  in_data = inmap.data;
  out_data = outmap.data;

  for (i = 0; i < num_frames; i++) {
    GST_LOG_OBJECT (dec, "Decoding frame %u/%u", i, num_frames);

    /* decode 40 input bytes to 640 output bytes */
    decode_ret = Siren7_DecodeFrame (dec->decoder, in_data, out_data);
    if (decode_ret != 0)
      goto decode_error;

    /* move to next frame */
    out_data += 640;
    in_data += 40;
  }

  gst_buffer_unmap (buf, &inmap);
  gst_buffer_unmap (out_buf, &outmap);

  GST_LOG_OBJECT (dec, "Finished decoding");

  /* might really be multiple frames,
   * but was treated as one for all purposes here */
  ret = gst_audio_decoder_finish_frame (bdec, out_buf, 1);

done:
  return ret;

  /* ERRORS */
alloc_failed:
  {
    GST_DEBUG_OBJECT (dec, "failed to pad_alloc buffer: %d (%s)", ret,
        gst_flow_get_name (ret));
    goto done;
  }
decode_error:
  {
    GST_AUDIO_DECODER_ERROR (bdec, 1, STREAM, DECODE, (NULL),
        ("Error decoding frame: %d", decode_ret), ret);
    if (ret == GST_FLOW_OK)
      gst_audio_decoder_finish_frame (bdec, NULL, 1);
    gst_buffer_unref (out_buf);
    goto done;
  }
}
Ejemplo n.º 7
0
static GstFlowReturn
gst_a52dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buffer)
{
  GstA52Dec *a52dec;
  gint channels, i;
  gboolean need_reneg = FALSE;
  gint chans;
  gint length = 0, flags, sample_rate, bit_rate;
  GstMapInfo map;
  GstFlowReturn result = GST_FLOW_OK;
  GstBuffer *outbuf;
  const gint num_blocks = 6;

  a52dec = GST_A52DEC (bdec);

  /* no fancy draining */
  if (G_UNLIKELY (!buffer))
    return GST_FLOW_OK;

  /* parsed stuff already, so this should work out fine */
  gst_buffer_map (buffer, &map, GST_MAP_READ);
  g_assert (map.size >= 7);

  /* re-obtain some sync header info,
   * should be same as during _parse and could also be cached there,
   * but anyway ... */
  bit_rate = a52dec->bit_rate;
  sample_rate = a52dec->sample_rate;
  flags = 0;
  length = a52_syncinfo (map.data, &flags, &sample_rate, &bit_rate);
  g_assert (length == map.size);

  /* update stream information, renegotiate or re-streaminfo if needed */
  need_reneg = FALSE;
  if (a52dec->sample_rate != sample_rate) {
    GST_DEBUG_OBJECT (a52dec, "sample rate changed");
    need_reneg = TRUE;
    a52dec->sample_rate = sample_rate;
  }

  if (flags) {
    if (a52dec->stream_channels != (flags & (A52_CHANNEL_MASK | A52_LFE))) {
      GST_DEBUG_OBJECT (a52dec, "stream channel flags changed, marking update");
      a52dec->flag_update = TRUE;
    }
    a52dec->stream_channels = flags & (A52_CHANNEL_MASK | A52_LFE);
  }

  if (bit_rate != a52dec->bit_rate) {
    a52dec->bit_rate = bit_rate;
    gst_a52dec_update_streaminfo (a52dec);
  }

  /* If we haven't had an explicit number of channels chosen through properties
   * at this point, choose what to downmix to now, based on what the peer will
   * accept - this allows a52dec to do downmixing in preference to a
   * downstream element such as audioconvert.
   */
  if (a52dec->request_channels != A52_CHANNEL) {
    flags = a52dec->request_channels;
  } else if (a52dec->flag_update) {
    GstCaps *caps;

    a52dec->flag_update = FALSE;

    caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (a52dec));
    if (caps && gst_caps_get_size (caps) > 0) {
      GstCaps *copy = gst_caps_copy_nth (caps, 0);
      GstStructure *structure = gst_caps_get_structure (copy, 0);
      gint orig_channels = flags ? gst_a52dec_channels (flags, NULL) : 6;
      gint fixed_channels = 0;
      const int a52_channels[6] = {
        A52_MONO,
        A52_STEREO,
        A52_STEREO | A52_LFE,
        A52_2F2R,
        A52_2F2R | A52_LFE,
        A52_3F2R | A52_LFE,
      };

      /* Prefer the original number of channels, but fixate to something
       * preferred (first in the caps) downstream if possible.
       */
      gst_structure_fixate_field_nearest_int (structure, "channels",
          orig_channels);

      if (gst_structure_get_int (structure, "channels", &fixed_channels)
          && fixed_channels <= 6) {
        if (fixed_channels < orig_channels)
          flags = a52_channels[fixed_channels - 1];
      } else {
        flags = a52_channels[5];
      }

      gst_caps_unref (copy);
    } else if (flags)
      flags = a52dec->stream_channels;
    else
      flags = A52_3F2R | A52_LFE;

    if (caps)
      gst_caps_unref (caps);
  } else {
    flags = a52dec->using_channels;
  }

  /* process */
  flags |= A52_ADJUST_LEVEL;
  a52dec->level = 1;
  if (a52_frame (a52dec->state, map.data, &flags, &a52dec->level, a52dec->bias)) {
    gst_buffer_unmap (buffer, &map);
    GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL),
        ("a52_frame error"), result);
    goto exit;
  }
  gst_buffer_unmap (buffer, &map);

  channels = flags & (A52_CHANNEL_MASK | A52_LFE);
  if (a52dec->using_channels != channels) {
    need_reneg = TRUE;
    a52dec->using_channels = channels;
  }

  /* negotiate if required */
  if (need_reneg) {
    GST_DEBUG_OBJECT (a52dec,
        "a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d",
        a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels);
    if (!gst_a52dec_reneg (a52dec))
      goto failed_negotiation;
  }

  if (a52dec->dynamic_range_compression == FALSE) {
    a52_dynrng (a52dec->state, NULL, NULL);
  }

  flags &= (A52_CHANNEL_MASK | A52_LFE);
  chans = gst_a52dec_channels (flags, NULL);
  if (!chans)
    goto invalid_flags;

  /* handle decoded data;
   * each frame has 6 blocks, one block is 256 samples, ea */
  outbuf =
      gst_buffer_new_and_alloc (256 * chans * (SAMPLE_WIDTH / 8) * num_blocks);

  gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
  {
    guint8 *ptr = map.data;
    for (i = 0; i < num_blocks; i++) {
      if (a52_block (a52dec->state)) {
        /* also marks discont */
        GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL),
            ("error decoding block %d", i), result);
        if (result != GST_FLOW_OK) {
          gst_buffer_unmap (outbuf, &map);
          goto exit;
        }
      } else {
        gint n, c;
        gint *reorder_map = a52dec->channel_reorder_map;

        for (n = 0; n < 256; n++) {
          for (c = 0; c < chans; c++) {
            ((sample_t *) ptr)[n * chans + reorder_map[c]] =
                a52dec->samples[c * 256 + n];
          }
        }
      }
      ptr += 256 * chans * (SAMPLE_WIDTH / 8);
    }
  }
  gst_buffer_unmap (outbuf, &map);

  result = gst_audio_decoder_finish_frame (bdec, outbuf, 1);

exit:
  return result;

  /* ERRORS */
failed_negotiation:
  {
    GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL));
    return GST_FLOW_ERROR;
  }
invalid_flags:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL),
        ("Invalid channel flags: %d", flags));
    return GST_FLOW_ERROR;
  }
}
static GstFlowReturn
gst_mpg123_audio_dec_handle_frame (GstAudioDecoder * dec,
    GstBuffer * input_buffer)
{
  GstMpg123AudioDec *mpg123_decoder;
  int decode_error;
  unsigned char *decoded_bytes;
  size_t num_decoded_bytes;
  GstFlowReturn retval;

  mpg123_decoder = GST_MPG123_AUDIO_DEC (dec);

  g_assert (mpg123_decoder->handle != NULL);

  /* The actual decoding */
  {
    /* feed input data (if there is any) */
    if (G_LIKELY (input_buffer != NULL)) {
      GstMapInfo info;

      if (gst_buffer_map (input_buffer, &info, GST_MAP_READ)) {
        mpg123_feed (mpg123_decoder->handle, info.data, info.size);
        gst_buffer_unmap (input_buffer, &info);
      } else {
        GST_AUDIO_DECODER_ERROR (mpg123_decoder, 1, RESOURCE, READ, (NULL),
            ("gst_memory_map() failed"), retval);
        return retval;
      }
    }

    /* Try to decode a frame */
    decoded_bytes = NULL;
    num_decoded_bytes = 0;
    decode_error = mpg123_decode_frame (mpg123_decoder->handle,
        &mpg123_decoder->frame_offset, &decoded_bytes, &num_decoded_bytes);
  }

  retval = GST_FLOW_OK;

  switch (decode_error) {
    case MPG123_NEW_FORMAT:
      /* As mentioned in gst_mpg123_audio_dec_set_format(), the next audioinfo
       * is not set immediately; instead, the code waits for mpg123 to take
       * note of the new format, and then sets the audioinfo. This fixes glitches
       * with mp3s containing several format headers (for example, first half
       * using 44.1kHz, second half 32 kHz) */

      GST_LOG_OBJECT (dec,
          "mpg123 reported a new format -> setting next srccaps");

      gst_mpg123_audio_dec_push_decoded_bytes (mpg123_decoder, decoded_bytes,
          num_decoded_bytes);

      /* If there is a next audioinfo, use it, then set has_next_audioinfo to
       * FALSE, to make sure gst_audio_decoder_set_output_format() isn't called
       * again until set_format is called by the base class */
      if (mpg123_decoder->has_next_audioinfo) {
        if (!gst_audio_decoder_set_output_format (dec,
                &(mpg123_decoder->next_audioinfo))) {
          GST_WARNING_OBJECT (dec, "Unable to set output format");
          retval = GST_FLOW_NOT_NEGOTIATED;
        }
        mpg123_decoder->has_next_audioinfo = FALSE;
      }

      break;

    case MPG123_NEED_MORE:
    case MPG123_OK:
      retval = gst_mpg123_audio_dec_push_decoded_bytes (mpg123_decoder,
          decoded_bytes, num_decoded_bytes);
      break;

    case MPG123_DONE:
      /* If this happens, then the upstream parser somehow missed the ending
       * of the bitstream */
      GST_LOG_OBJECT (dec, "mpg123 is done decoding");
      gst_mpg123_audio_dec_push_decoded_bytes (mpg123_decoder, decoded_bytes,
          num_decoded_bytes);
      retval = GST_FLOW_EOS;
      break;

    default:
    {
      /* Anything else is considered an error */
      int errcode;
      retval = GST_FLOW_ERROR;  /* use error by default */
      switch (decode_error) {
        case MPG123_ERR:
          errcode = mpg123_errcode (mpg123_decoder->handle);
          break;
        default:
          errcode = decode_error;
      }
      switch (errcode) {
        case MPG123_BAD_OUTFORMAT:{
          GstCaps *input_caps =
              gst_pad_get_current_caps (GST_AUDIO_DECODER_SINK_PAD (dec));
          GST_ELEMENT_ERROR (dec, STREAM, FORMAT, (NULL),
              ("Output sample format could not be used when trying to decode frame. "
                  "This is typically caused when the input caps (often the sample "
                  "rate) do not match the actual format of the audio data. "
                  "Input caps: %" GST_PTR_FORMAT, input_caps)
              );
          gst_caps_unref (input_caps);
          break;
        }
        default:{
          char const *errmsg = mpg123_plain_strerror (errcode);
          /* GST_AUDIO_DECODER_ERROR sets a new return value according to
           * its estimations */
          GST_AUDIO_DECODER_ERROR (mpg123_decoder, 1, STREAM, DECODE, (NULL),
              ("mpg123 decoding error: %s", errmsg), retval);
        }
      }
    }
  }

  return retval;
}