コード例 #1
0
static GstFlowReturn
gst_amrnbenc_handle_frame (GstAudioEncoder * enc, GstBuffer * buffer)
{
  GstAmrnbEnc *amrnbenc;
  GstFlowReturn ret;
  GstBuffer *out;
  GstMapInfo in_map, out_map;
  gsize out_size;

  amrnbenc = GST_AMRNBENC (enc);

  g_return_val_if_fail (amrnbenc->handle, GST_FLOW_FLUSHING);

  /* we don't deal with squeezing remnants, so simply discard those */
  if (G_UNLIKELY (buffer == NULL)) {
    GST_DEBUG_OBJECT (amrnbenc, "no data");
    return GST_FLOW_OK;
  }

  gst_buffer_map (buffer, &in_map, GST_MAP_READ);

  if (G_UNLIKELY (in_map.size < 320)) {
    gst_buffer_unmap (buffer, &in_map);
    GST_DEBUG_OBJECT (amrnbenc, "discarding trailing data of %" G_GSIZE_FORMAT
        " bytes", in_map.size);
    return gst_audio_encoder_finish_frame (enc, NULL, -1);
  }

  /* get output, max size is 32 */
  out = gst_buffer_new_and_alloc (32);
  /* AMR encoder actually writes into the source data buffers it gets */
  /* should be able to handle that with what we are given */

  gst_buffer_map (out, &out_map, GST_MAP_WRITE);
  /* encode */
  out_size =
      Encoder_Interface_Encode (amrnbenc->handle, amrnbenc->bandmode,
      (short *) in_map.data, out_map.data, 0);
  gst_buffer_unmap (out, &out_map);
  gst_buffer_resize (out, 0, out_size);
  gst_buffer_unmap (buffer, &in_map);

  GST_LOG_OBJECT (amrnbenc, "output data size %" G_GSIZE_FORMAT, out_size);

  if (out_size) {
    ret = gst_audio_encoder_finish_frame (enc, out, 160);
  } else {
    /* should not happen (without dtx or so at least) */
    GST_WARNING_OBJECT (amrnbenc, "no encoded data; discarding input");
    gst_buffer_unref (out);
    ret = gst_audio_encoder_finish_frame (enc, NULL, -1);
  }

  return ret;
}
コード例 #2
0
static GstFlowReturn
gst_two_lame_flush_full (GstTwoLame * lame, gboolean push)
{
  GstBuffer *buf;
  GstMapInfo map;
  gint size;
  GstFlowReturn result = GST_FLOW_OK;

  if (!lame->glopts)
    return GST_FLOW_OK;

  buf = gst_buffer_new_and_alloc (16384);
  gst_buffer_map (buf, &map, GST_MAP_WRITE);
  size = twolame_encode_flush (lame->glopts, map.data, 16384);
  gst_buffer_unmap (buf, &map);

  if (size > 0 && push) {
    gst_buffer_set_size (buf, size);
    GST_DEBUG_OBJECT (lame, "pushing final packet of %u bytes", size);
    result = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (lame), buf, -1);
  } else {
    GST_DEBUG_OBJECT (lame, "no final packet (size=%d, push=%d)", size, push);
    gst_buffer_unref (buf);
    result = GST_FLOW_OK;
  }
  return result;
}
コード例 #3
0
ファイル: audioencoder.c プロジェクト: pexip/gst-plugins-base
static GstFlowReturn
gst_audio_encoder_tester_handle_frame (GstAudioEncoder * enc,
    GstBuffer * buffer)
{
  guint8 *data;
  GstMapInfo map;
  guint64 input_num;
  GstBuffer *output_buffer;

  if (buffer == NULL)
    return GST_FLOW_OK;

  gst_buffer_map (buffer, &map, GST_MAP_READ);
  input_num = *((guint64 *) map.data);
  gst_buffer_unmap (buffer, &map);

  data = g_malloc (sizeof (guint64));
  *(guint64 *) data = input_num;

  output_buffer = gst_buffer_new_wrapped (data, sizeof (guint64));
  GST_BUFFER_PTS (output_buffer) = GST_BUFFER_PTS (buffer);
  GST_BUFFER_DURATION (output_buffer) = GST_BUFFER_DURATION (buffer);

  return gst_audio_encoder_finish_frame (enc, output_buffer, TEST_AUDIO_RATE);
}
コード例 #4
0
static GstFlowReturn
gst_vorbis_enc_output_buffers (GstVorbisEnc * vorbisenc)
{
  GstFlowReturn ret;

  /* vorbis does some data preanalysis, then divides up blocks for
     more involved (potentially parallel) processing.  Get a single
     block for encoding now */
  while (vorbis_analysis_blockout (&vorbisenc->vd, &vorbisenc->vb) == 1) {
    ogg_packet op;

    GST_LOG_OBJECT (vorbisenc, "analysed to a block");

    /* analysis */
    vorbis_analysis (&vorbisenc->vb, NULL);
    vorbis_bitrate_addblock (&vorbisenc->vb);

    while (vorbis_bitrate_flushpacket (&vorbisenc->vd, &op)) {
      GstBuffer *buf;

      if (op.e_o_s) {
        GstAudioEncoder *enc = GST_AUDIO_ENCODER (vorbisenc);
        GstClockTime duration;

        GST_DEBUG_OBJECT (vorbisenc, "Got EOS packet from libvorbis");
        GST_AUDIO_ENCODER_STREAM_LOCK (enc);
        if (!GST_CLOCK_TIME_IS_VALID (enc->output_segment.stop)) {
          GST_DEBUG_OBJECT (vorbisenc,
              "Output segment has no end time, setting");
          duration =
              gst_util_uint64_scale (op.granulepos, GST_SECOND,
              vorbisenc->frequency);
          enc->output_segment.stop = enc->output_segment.start + duration;
          GST_DEBUG_OBJECT (enc, "new output segment %" GST_SEGMENT_FORMAT,
              &enc->output_segment);
          gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (enc),
              gst_event_new_segment (&enc->output_segment));
        }
        GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
      }

      GST_LOG_OBJECT (vorbisenc, "pushing out a data packet");
      buf =
          gst_audio_encoder_allocate_output_buffer (GST_AUDIO_ENCODER
          (vorbisenc), op.bytes);
      gst_buffer_fill (buf, 0, op.packet, op.bytes);
      /* tracking granulepos should tell us samples accounted for */
      ret =
          gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER
          (vorbisenc), buf, op.granulepos - vorbisenc->samples_out);
      vorbisenc->samples_out = op.granulepos;

      if (ret != GST_FLOW_OK)
        return ret;
    }
  }

  return GST_FLOW_OK;
}
コード例 #5
0
static GstFlowReturn
gst_gsmenc_handle_frame (GstAudioEncoder * benc, GstBuffer * buffer)
{
  GstGSMEnc *gsmenc;
  gsm_signal *data;
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *outbuf;
  GstMapInfo map, omap;

  gsmenc = GST_GSMENC (benc);

  /* we don't deal with squeezing remnants, so simply discard those */
  if (G_UNLIKELY (buffer == NULL)) {
    GST_DEBUG_OBJECT (gsmenc, "no data");
    goto done;
  }

  gst_buffer_map (buffer, &map, GST_MAP_READ);
  if (G_UNLIKELY (map.size < 320)) {
    GST_DEBUG_OBJECT (gsmenc, "discarding trailing data %d", (gint) map.size);
    gst_buffer_unmap (buffer, &map);
    ret = gst_audio_encoder_finish_frame (benc, NULL, -1);
    goto done;
  }

  outbuf = gst_buffer_new_and_alloc (33 * sizeof (gsm_byte));
  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);

  /* encode 160 16-bit samples into 33 bytes */
  data = (gsm_signal *) map.data;
  gsm_encode (gsmenc->state, data, (gsm_byte *) omap.data);

  GST_LOG_OBJECT (gsmenc, "encoded to %d bytes", (gint) omap.size);
  gst_buffer_unmap (buffer, &map);
  gst_buffer_unmap (buffer, &omap);

  ret = gst_audio_encoder_finish_frame (benc, outbuf, 160);

done:
  return ret;
}
コード例 #6
0
static GstFlowReturn
gst_mulawenc_handle_frame (GstAudioEncoder * audioenc, GstBuffer * buffer)
{
  GstMuLawEnc *mulawenc;
  GstMapInfo inmap, outmap;
  gint16 *linear_data;
  gsize linear_size;
  guint8 *mulaw_data;
  guint mulaw_size;
  GstBuffer *outbuf;
  GstFlowReturn ret;

  if (!buffer) {
    ret = GST_FLOW_OK;
    goto done;
  }

  mulawenc = GST_MULAWENC (audioenc);

  if (!mulawenc->rate || !mulawenc->channels)
    goto not_negotiated;

  gst_buffer_map (buffer, &inmap, GST_MAP_READ);
  linear_data = (gint16 *) inmap.data;
  linear_size = inmap.size;

  mulaw_size = linear_size / 2;

  outbuf = gst_audio_encoder_allocate_output_buffer (audioenc, mulaw_size);

  g_assert (outbuf);

  gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE);
  mulaw_data = outmap.data;

  mulaw_encode (linear_data, mulaw_data, mulaw_size);

  gst_buffer_unmap (outbuf, &outmap);
  gst_buffer_unmap (buffer, &inmap);

  ret = gst_audio_encoder_finish_frame (audioenc, outbuf, -1);

done:

  return ret;

not_negotiated:
  {
    GST_DEBUG_OBJECT (mulawenc, "no format negotiated");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
}
コード例 #7
0
ファイル: gstvorbisenc.c プロジェクト: PeterXu/gst-mobile
static GstFlowReturn
gst_vorbis_enc_output_buffers (GstVorbisEnc * vorbisenc)
{
    GstFlowReturn ret;

    /* vorbis does some data preanalysis, then divides up blocks for
       more involved (potentially parallel) processing.  Get a single
       block for encoding now */
    while (vorbis_analysis_blockout (&vorbisenc->vd, &vorbisenc->vb) == 1) {
        ogg_packet op;

        GST_LOG_OBJECT (vorbisenc, "analysed to a block");

        /* analysis */
        vorbis_analysis (&vorbisenc->vb, NULL);
        vorbis_bitrate_addblock (&vorbisenc->vb);

        while (vorbis_bitrate_flushpacket (&vorbisenc->vd, &op)) {
            GstBuffer *buf;

            GST_LOG_OBJECT (vorbisenc, "pushing out a data packet");
            buf =
                gst_audio_encoder_allocate_output_buffer (GST_AUDIO_ENCODER
                        (vorbisenc), op.bytes);
            gst_buffer_fill (buf, 0, op.packet, op.bytes);
            /* tracking granulepos should tell us samples accounted for */
            ret =
                gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER
                                                (vorbisenc), buf, op.granulepos - vorbisenc->samples_out);
            vorbisenc->samples_out = op.granulepos;

            if (ret != GST_FLOW_OK)
                return ret;
        }
    }

    return GST_FLOW_OK;
}
コード例 #8
0
ファイル: gstceltenc.c プロジェクト: cbetz421/gst-plugins-bad
static GstFlowReturn
gst_celt_enc_encode (GstCeltEnc * enc, GstBuffer * buf)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint frame_size = enc->frame_size;
  gint bytes = frame_size * 2 * enc->channels;
  gint bytes_per_packet;
  gint16 *data, *data0 = NULL;
  gint outsize, size;
  GstBuffer *outbuf;
  GstMapInfo map, omap;

  if (G_LIKELY (buf)) {
    gst_buffer_map (buf, &map, GST_MAP_READ);
    data = (gint16 *) map.data;
    size = map.size;

    if (G_UNLIKELY (map.size % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");
      size = ((size / bytes) + 1) * bytes;
      data0 = g_malloc0 (size);
      memcpy (data0, data, size);
      data = data0;
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  frame_size = size / (2 * enc->channels);
  if (enc->cbr) {
    bytes_per_packet = (enc->bitrate * frame_size / enc->rate + 4) / 8;
  } else {
    bytes_per_packet = (enc->max_bitrate * frame_size / enc->rate + 4) / 8;
  }

  outbuf = gst_buffer_new_and_alloc (bytes_per_packet);
  GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)", frame_size, bytes);

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);

#ifdef HAVE_CELT_0_8
  outsize =
      celt_encode (enc->state, data, frame_size, omap.data, bytes_per_packet);
#else
  outsize = celt_encode (enc->state, data, NULL, omap.data, bytes_per_packet);
#endif

  gst_buffer_unmap (outbuf, &omap);
  gst_buffer_unmap (buf, &map);

  if (outsize < 0) {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("encoding failed: %d", outsize));
    ret = GST_FLOW_ERROR;
    goto done;
  }

  GST_DEBUG_OBJECT (enc, "encoding %d bytes", bytes);

  ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc),
      outbuf, frame_size);

done:
  g_free (data0);
  return ret;
}
コード例 #9
0
ファイル: gstavaudenc.c プロジェクト: GStreamer/gst-libav
static GstFlowReturn
gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc,
                               GstBuffer * buffer, gint * have_data)
{
    GstAudioEncoder *enc;
    AVCodecContext *ctx;
    gint res;
    GstFlowReturn ret;
    GstAudioInfo *info;
    AVPacket *pkt;
    AVFrame *frame = ffmpegaudenc->frame;
    gboolean planar;
    gint nsamples = -1;

    enc = GST_AUDIO_ENCODER (ffmpegaudenc);

    ctx = ffmpegaudenc->context;

    pkt = g_slice_new0 (AVPacket);

    if (buffer != NULL) {
        BufferInfo *buffer_info = g_slice_new0 (BufferInfo);
        guint8 *audio_in;
        guint in_size;

        buffer_info->buffer = buffer;
        gst_buffer_map (buffer, &buffer_info->map, GST_MAP_READ);
        audio_in = buffer_info->map.data;
        in_size = buffer_info->map.size;

        GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer %p size:%u", audio_in,
                        in_size);

        info = gst_audio_encoder_get_audio_info (enc);
        planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt);
        frame->format = ffmpegaudenc->context->sample_fmt;
        frame->sample_rate = ffmpegaudenc->context->sample_rate;
        frame->channels = ffmpegaudenc->context->channels;
        frame->channel_layout = ffmpegaudenc->context->channel_layout;

        if (planar && info->channels > 1) {
            gint channels;
            gint i, j;

            nsamples = frame->nb_samples = in_size / info->bpf;
            channels = info->channels;

            frame->buf[0] =
                av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0);

            if (info->channels > AV_NUM_DATA_POINTERS) {
                buffer_info->ext_data_array = frame->extended_data =
                                                  av_malloc_array (info->channels, sizeof (uint8_t *));
            } else {
                frame->extended_data = frame->data;
            }

            buffer_info->ext_data = frame->extended_data[0] = av_malloc (in_size);
            frame->linesize[0] = in_size / channels;
            for (i = 1; i < channels; i++)
                frame->extended_data[i] =
                    frame->extended_data[i - 1] + frame->linesize[0];

            switch (info->finfo->width) {
            case 8: {
                const guint8 *idata = (const guint8 *) audio_in;

                for (i = 0; i < nsamples; i++) {
                    for (j = 0; j < channels; j++) {
                        ((guint8 *) frame->extended_data[j])[i] = idata[j];
                    }
                    idata += channels;
                }
                break;
            }
            case 16: {
                const guint16 *idata = (const guint16 *) audio_in;

                for (i = 0; i < nsamples; i++) {
                    for (j = 0; j < channels; j++) {
                        ((guint16 *) frame->extended_data[j])[i] = idata[j];
                    }
                    idata += channels;
                }
                break;
            }
            case 32: {
                const guint32 *idata = (const guint32 *) audio_in;

                for (i = 0; i < nsamples; i++) {
                    for (j = 0; j < channels; j++) {
                        ((guint32 *) frame->extended_data[j])[i] = idata[j];
                    }
                    idata += channels;
                }

                break;
            }
            case 64: {
                const guint64 *idata = (const guint64 *) audio_in;

                for (i = 0; i < nsamples; i++) {
                    for (j = 0; j < channels; j++) {
                        ((guint64 *) frame->extended_data[j])[i] = idata[j];
                    }
                    idata += channels;
                }

                break;
            }
            default:
                g_assert_not_reached ();
                break;
            }

            gst_buffer_unmap (buffer, &buffer_info->map);
            gst_buffer_unref (buffer);
            buffer_info->buffer = NULL;
        } else {
            frame->data[0] = audio_in;
            frame->extended_data = frame->data;
            frame->linesize[0] = in_size;
            frame->nb_samples = nsamples = in_size / info->bpf;
            frame->buf[0] =
                av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0);
        }

        /* we have a frame to feed the encoder */
        res = avcodec_encode_audio2 (ctx, pkt, frame, have_data);

        av_frame_unref (frame);
    } else {
        GST_LOG_OBJECT (ffmpegaudenc, "draining");
        /* flushing the encoder */
        res = avcodec_encode_audio2 (ctx, pkt, NULL, have_data);
    }

    if (res < 0) {
        char error_str[128] = { 0, };

        g_slice_free (AVPacket, pkt);
        av_strerror (res, error_str, sizeof (error_str));
        GST_ERROR_OBJECT (enc, "Failed to encode buffer: %d - %s", res, error_str);
        return GST_FLOW_OK;
    }
    GST_LOG_OBJECT (ffmpegaudenc, "got output size %d", res);

    if (*have_data) {
        GstBuffer *outbuf;
        const AVCodec *codec;

        GST_LOG_OBJECT (ffmpegaudenc, "pushing size %d", pkt->size);

        outbuf =
            gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data,
                                         pkt->size, 0, pkt->size, pkt, gst_ffmpegaudenc_free_avpacket);

        codec = ffmpegaudenc->context->codec;
        if ((codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) || !buffer) {
            /* FIXME: Not really correct, as -1 means "all the samples we got
               given so far", which may not be true depending on the codec,
               but we have no way to know AFAICT */
            ret = gst_audio_encoder_finish_frame (enc, outbuf, -1);
        } else {
            ret = gst_audio_encoder_finish_frame (enc, outbuf, nsamples);
        }
    } else {
        GST_LOG_OBJECT (ffmpegaudenc, "no output produced");
        g_slice_free (AVPacket, pkt);
        ret = GST_FLOW_OK;
    }

    return ret;
}
コード例 #10
0
ファイル: gstopusenc.c プロジェクト: kanongil/gst-plugins-bad
static GstFlowReturn
gst_opus_enc_encode (GstOpusEnc * enc, GstBuffer * buf)
{
  guint8 *bdata, *data, *mdata = NULL;
  gsize bsize, size;
  gsize bytes;
  gint ret = GST_FLOW_OK;
  gint outsize;
  GstBuffer *outbuf;

  g_mutex_lock (enc->property_lock);

  bytes = enc->frame_samples * enc->n_channels * 2;
  if (G_LIKELY (buf)) {
    bdata = GST_BUFFER_DATA (buf);
    bsize = GST_BUFFER_SIZE (buf);
    if (G_UNLIKELY (bsize % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");

      size = ((bsize / bytes) + 1) * bytes;
      mdata = g_malloc0 (size);
      memcpy (mdata, bdata, bsize);
      bdata = NULL;
      data = mdata;
    } else {
      data = bdata;
      size = bsize;
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  g_assert (size == bytes);

  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, enc->max_payload_size * enc->n_channels,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if (GST_FLOW_OK != ret)
    goto done;

  GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)",
      enc->frame_samples, (int) bytes);

  outsize =
      opus_multistream_encode (enc->state, (const gint16 *) data,
      enc->frame_samples, GST_BUFFER_DATA (outbuf),
      enc->max_payload_size * enc->n_channels);

  if (outsize < 0) {
    GST_ERROR_OBJECT (enc, "Encoding failed: %d", outsize);
    ret = GST_FLOW_ERROR;
    goto done;
  } else if (outsize > enc->max_payload_size) {
    GST_WARNING_OBJECT (enc,
        "Encoded size %d is higher than max payload size (%d bytes)",
        outsize, enc->max_payload_size);
    ret = GST_FLOW_ERROR;
    goto done;
  }

  GST_DEBUG_OBJECT (enc, "Output packet is %u bytes", outsize);
  GST_BUFFER_SIZE (outbuf) = outsize;

  ret =
      gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc), outbuf,
      enc->frame_samples);

done:

  g_mutex_unlock (enc->property_lock);

  if (mdata)
    g_free (mdata);

  return ret;
}
コード例 #11
0
ファイル: gstopusenc.c プロジェクト: drothlis/gst-plugins-bad
static GstFlowReturn
gst_opus_enc_encode (GstOpusEnc * enc, GstBuffer * buf)
{
  guint8 *bdata = NULL, *data, *mdata = NULL;
  gsize bsize, size;
  gsize bytes = enc->frame_samples * enc->n_channels * 2;
  gint ret = GST_FLOW_OK;
  GstMapInfo map;
  GstMapInfo omap;
  gint outsize;
  GstBuffer *outbuf;

  g_mutex_lock (enc->property_lock);

  if (G_LIKELY (buf)) {
    gst_buffer_map (buf, &map, GST_MAP_READ);
    bdata = map.data;
    bsize = map.size;

    if (G_UNLIKELY (bsize % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");

      size = ((bsize / bytes) + 1) * bytes;
      mdata = g_malloc0 (size);
      memcpy (mdata, bdata, bsize);
      data = mdata;
    } else {
      data = bdata;
      size = bsize;
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  g_assert (size == bytes);

  outbuf = gst_buffer_new_and_alloc (enc->max_payload_size * enc->n_channels);
  if (!outbuf)
    goto done;

  GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)",
      enc->frame_samples, (int) bytes);

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);

  GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)",
      enc->frame_samples, (int) bytes);

  outsize =
      opus_multistream_encode (enc->state, (const gint16 *) data,
      enc->frame_samples, omap.data, enc->max_payload_size * enc->n_channels);

  gst_buffer_unmap (outbuf, &omap);

  if (outsize < 0) {
    GST_ERROR_OBJECT (enc, "Encoding failed: %d", outsize);
    ret = GST_FLOW_ERROR;
    goto done;
  } else if (outsize > enc->max_payload_size) {
    GST_WARNING_OBJECT (enc,
        "Encoded size %d is higher than max payload size (%d bytes)",
        outsize, enc->max_payload_size);
    ret = GST_FLOW_ERROR;
    goto done;
  }

  GST_DEBUG_OBJECT (enc, "Output packet is %u bytes", outsize);
  gst_buffer_set_size (outbuf, outsize);

  ret =
      gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc), outbuf,
      enc->frame_samples);

done:

  if (bdata)
    gst_buffer_unmap (buf, &map);
  g_mutex_unlock (enc->property_lock);

  if (mdata)
    g_free (mdata);

  return ret;
}
コード例 #12
0
ファイル: gstceltenc.c プロジェクト: dylansong77/gstreamer
static GstFlowReturn
gst_celt_enc_encode (GstCeltEnc * enc, GstBuffer * buf)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint frame_size = enc->frame_size;
  gint bytes = frame_size * 2 * enc->channels;
  gint bytes_per_packet;
  gint16 *data, *data0 = NULL;
  gint outsize, size;
  GstBuffer *outbuf;

  if (G_LIKELY (buf)) {
    data = (gint16 *) GST_BUFFER_DATA (buf);
    size = GST_BUFFER_SIZE (buf);

    if (G_UNLIKELY (size % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");
      size = ((size / bytes) + 1) * bytes;
      data0 = data = g_malloc0 (size);
      memcpy (data, GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf));
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  frame_size = size / (2 * enc->channels);
  if (enc->cbr) {
    bytes_per_packet = (enc->bitrate * frame_size / enc->rate + 4) / 8;
  } else {
    bytes_per_packet = (enc->max_bitrate * frame_size / enc->rate + 4) / 8;
  }

  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, bytes_per_packet,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if (GST_FLOW_OK != ret)
    goto done;

  GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)", frame_size, bytes);

#ifdef HAVE_CELT_0_8
  outsize =
      celt_encode (enc->state, data, frame_size,
      GST_BUFFER_DATA (outbuf), bytes_per_packet);
#else
  outsize =
      celt_encode (enc->state, data, NULL,
      GST_BUFFER_DATA (outbuf), bytes_per_packet);
#endif

  if (outsize < 0) {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("encoding failed: %d", outsize));
    ret = GST_FLOW_ERROR;
    goto done;
  }

  GST_DEBUG_OBJECT (enc, "encoding %d bytes", bytes);

  ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc),
      outbuf, frame_size);

done:
  g_free (data0);
  return ret;
}
コード例 #13
0
static GstFlowReturn
gst_two_lame_handle_frame (GstAudioEncoder * enc, GstBuffer * buf)
{
  GstTwoLame *twolame;
  gint mp3_buffer_size, mp3_size;
  GstBuffer *mp3_buf;
  GstFlowReturn result;
  gint num_samples;
  GstMapInfo map, mp3_map;

  twolame = GST_TWO_LAME (enc);

  /* squeeze remaining and push */
  if (G_UNLIKELY (buf == NULL))
    return gst_two_lame_flush_full (twolame, TRUE);

  gst_buffer_map (buf, &map, GST_MAP_READ);

  if (twolame->float_input)
    num_samples = map.size / 4;
  else
    num_samples = map.size / 2;

  /* allocate space for output */
  mp3_buffer_size = 1.25 * num_samples + 16384;
  mp3_buf = gst_buffer_new_and_alloc (mp3_buffer_size);
  gst_buffer_map (mp3_buf, &mp3_map, GST_MAP_WRITE);

  if (twolame->num_channels == 1) {
    if (twolame->float_input)
      mp3_size = twolame_encode_buffer_float32 (twolame->glopts,
          (float *) map.data,
          (float *) map.data, num_samples, mp3_map.data, mp3_buffer_size);
    else
      mp3_size = twolame_encode_buffer (twolame->glopts,
          (short int *) map.data,
          (short int *) map.data, num_samples, mp3_map.data, mp3_buffer_size);
  } else {
    if (twolame->float_input)
      mp3_size = twolame_encode_buffer_float32_interleaved (twolame->glopts,
          (float *) map.data,
          num_samples / twolame->num_channels, mp3_map.data, mp3_buffer_size);
    else
      mp3_size = twolame_encode_buffer_interleaved (twolame->glopts,
          (short int *) map.data,
          num_samples / twolame->num_channels, mp3_map.data, mp3_buffer_size);
  }

  GST_LOG_OBJECT (twolame, "encoded %" G_GSIZE_FORMAT " bytes of audio "
      "to %d bytes of mp3", map.size, mp3_size);

  gst_buffer_unmap (buf, &map);
  gst_buffer_unmap (mp3_buf, &mp3_map);

  if (mp3_size > 0) {
    gst_buffer_set_size (mp3_buf, mp3_size);
    result = gst_audio_encoder_finish_frame (enc, mp3_buf, -1);
  } else {
    if (mp3_size < 0) {
      /* eat error ? */
      g_warning ("error %d", mp3_size);
    }
    gst_buffer_unref (mp3_buf);
    result = GST_FLOW_OK;
  }

  return result;
}
コード例 #14
0
static GstFlowReturn
gst_speex_enc_encode (GstSpeexEnc * enc, GstBuffer * buf)
{
  gint frame_size = enc->frame_size;
  gint bytes = frame_size * 2 * enc->channels, samples, size;
  gint outsize, written, dtx_ret = 0;
  guint8 *data;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;

  if (G_LIKELY (buf)) {
    data = GST_BUFFER_DATA (buf);
    size = GST_BUFFER_SIZE (buf);

    if (G_UNLIKELY (size % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");
      size = ((size / bytes) + 1) * bytes;
      data = g_malloc0 (size);
      memcpy (data, GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf));
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  samples = size / (2 * enc->channels);
  speex_bits_reset (&enc->bits);

  /* FIXME what about dropped samples if DTS enabled ?? */

  while (size) {
    GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)", frame_size, bytes);

    if (enc->channels == 2) {
      speex_encode_stereo_int ((gint16 *) data, frame_size, &enc->bits);
    }
    dtx_ret += speex_encode_int (enc->state, (gint16 *) data, &enc->bits);

    data += bytes;
    size -= bytes;
  }

  speex_bits_insert_terminator (&enc->bits);
  outsize = speex_bits_nbytes (&enc->bits);

  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, outsize,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if ((GST_FLOW_OK != ret))
    goto done;

  written = speex_bits_write (&enc->bits,
      (gchar *) GST_BUFFER_DATA (outbuf), outsize);

  if (G_UNLIKELY (written < outsize)) {
    GST_ERROR_OBJECT (enc, "short write: %d < %d bytes", written, outsize);
    GST_BUFFER_SIZE (outbuf) = written;
  } else if (G_UNLIKELY (written > outsize)) {
    GST_ERROR_OBJECT (enc, "overrun: %d > %d bytes", written, outsize);
  }

  if (!dtx_ret)
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);

  ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc),
      outbuf, samples);

done:
  return ret;
}
コード例 #15
0
static GstFlowReturn
gst_fdkaacenc_handle_frame (GstAudioEncoder * enc, GstBuffer * inbuf)
{
  GstFdkAacEnc *self = GST_FDKAACENC (enc);
  GstFlowReturn ret = GST_FLOW_OK;
  GstAudioInfo *info;
  GstMapInfo imap, omap;
  GstBuffer *outbuf;
  AACENC_BufDesc in_desc = { 0 };
  AACENC_BufDesc out_desc = { 0 };
  AACENC_InArgs in_args = { 0 };
  AACENC_OutArgs out_args = { 0 };
  gint in_id = IN_AUDIO_DATA, out_id = OUT_BITSTREAM_DATA;
  gint in_sizes, out_sizes;
  gint in_el_sizes, out_el_sizes;
  AACENC_ERROR err;

  info = gst_audio_encoder_get_audio_info (enc);

  if (inbuf) {
    if (self->need_reorder) {
      inbuf = gst_buffer_copy (inbuf);
      gst_buffer_map (inbuf, &imap, GST_MAP_READWRITE);
      gst_audio_reorder_channels (imap.data, imap.size,
          GST_AUDIO_INFO_FORMAT (info), GST_AUDIO_INFO_CHANNELS (info),
          &GST_AUDIO_INFO_POSITION (info, 0), self->aac_positions);
    } else {
      gst_buffer_map (inbuf, &imap, GST_MAP_READ);
    }

    in_args.numInSamples = imap.size / GST_AUDIO_INFO_BPS (info);

    in_sizes = imap.size;
    in_el_sizes = GST_AUDIO_INFO_BPS (info);
    in_desc.numBufs = 1;
  } else {
    in_args.numInSamples = -1;

    in_sizes = 0;
    in_el_sizes = 0;
    in_desc.numBufs = 0;
  }

  in_desc.bufferIdentifiers = &in_id;
  in_desc.bufs = (void *) &imap.data;
  in_desc.bufSizes = &in_sizes;
  in_desc.bufElSizes = &in_el_sizes;

  outbuf = gst_audio_encoder_allocate_output_buffer (enc, self->outbuf_size);
  if (!outbuf) {
    ret = GST_FLOW_ERROR;
    goto out;
  }

  gst_buffer_map (outbuf, &omap, GST_MAP_WRITE);
  out_sizes = omap.size;
  out_el_sizes = 1;
  out_desc.bufferIdentifiers = &out_id;
  out_desc.numBufs = 1;
  out_desc.bufs = (void *) &omap.data;
  out_desc.bufSizes = &out_sizes;
  out_desc.bufElSizes = &out_el_sizes;

  err = aacEncEncode (self->enc, &in_desc, &out_desc, &in_args, &out_args);
  if (err == AACENC_ENCODE_EOF && !inbuf)
    goto out;
  else if (err != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Failed to encode data: %d", err);
    ret = GST_FLOW_ERROR;
    goto out;
  }

  if (inbuf) {
    gst_buffer_unmap (inbuf, &imap);
    if (self->need_reorder)
      gst_buffer_unref (inbuf);
    inbuf = NULL;
  }

  if (!out_args.numOutBytes)
    goto out;

  gst_buffer_unmap (outbuf, &omap);
  gst_buffer_set_size (outbuf, out_args.numOutBytes);

  ret = gst_audio_encoder_finish_frame (enc, outbuf, self->samples_per_frame);
  outbuf = NULL;

out:
  if (outbuf) {
    gst_buffer_unmap (outbuf, &omap);
    gst_buffer_unref (outbuf);
  }
  if (inbuf) {
    gst_buffer_unmap (inbuf, &imap);
    if (self->need_reorder)
      gst_buffer_unref (inbuf);
  }

  return ret;
}
コード例 #16
0
static GstFlowReturn
gst_siren_enc_handle_frame (GstAudioEncoder * benc, GstBuffer * buf)
{
  GstSirenEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *out_buf;
  guint8 *in_data, *out_data;
  guint i, size, num_frames;
  gint out_size, in_size;
  gint encode_ret;

  g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);

  enc = GST_SIREN_ENC (benc);

  size = GST_BUFFER_SIZE (buf);

  GST_LOG_OBJECT (enc, "Received buffer of size %d", GST_BUFFER_SIZE (buf));

  g_return_val_if_fail (size > 0, GST_FLOW_ERROR);
  g_return_val_if_fail (size % 640 == 0, GST_FLOW_ERROR);

  /* we need to process 640 input bytes to produce 40 output bytes */
  /* calculate the amount of frames we will handle */
  num_frames = size / 640;

  /* this is the input/output size */
  in_size = num_frames * 640;
  out_size = num_frames * 40;

  GST_LOG_OBJECT (enc, "we have %u frames, %u in, %u out", num_frames, in_size,
      out_size);

  /* get a buffer */
  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (benc),
      -1, out_size, GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (benc)), &out_buf);
  if (ret != GST_FLOW_OK)
    goto alloc_failed;

  /* get the input data for all the frames */
  in_data = GST_BUFFER_DATA (buf);
  out_data = GST_BUFFER_DATA (out_buf);

  for (i = 0; i < num_frames; i++) {
    GST_LOG_OBJECT (enc, "Encoding frame %u/%u", i, num_frames);

    /* encode 640 input bytes to 40 output bytes */
    encode_ret = Siren7_EncodeFrame (enc->encoder, in_data, out_data);
    if (encode_ret != 0)
      goto encode_error;

    /* move to next frame */
    out_data += 40;
    in_data += 640;
  }

  GST_LOG_OBJECT (enc, "Finished encoding");

  /* we encode all we get, pass it along */
  ret = gst_audio_encoder_finish_frame (benc, out_buf, -1);

done:
  return ret;

  /* ERRORS */
alloc_failed:
  {
    GST_DEBUG_OBJECT (enc, "failed to pad_alloc buffer: %d (%s)", ret,
        gst_flow_get_name (ret));
    goto done;
  }
encode_error:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("Error encoding frame: %d", encode_ret));
    ret = GST_FLOW_ERROR;
    gst_buffer_unref (out_buf);
    goto done;
  }
}
コード例 #17
0
static GstFlowReturn
gst_speex_enc_encode (GstSpeexEnc * enc, GstBuffer * buf)
{
  gint frame_size = enc->frame_size;
  gint bytes = frame_size * 2 * enc->channels, samples;
  gint outsize, written, dtx_ret = 0;
  GstMapInfo map;
  guint8 *data, *data0 = NULL, *bdata;
  gsize bsize, size;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;

  if (G_LIKELY (buf)) {
    gst_buffer_map (buf, &map, GST_MAP_READ);
    bdata = map.data;
    bsize = map.size;

    if (G_UNLIKELY (bsize % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");

      size = ((bsize / bytes) + 1) * bytes;
      data0 = data = g_malloc0 (size);
      memcpy (data, bdata, bsize);
      gst_buffer_unmap (buf, &map);
      bdata = NULL;
    } else {
      data = bdata;
      size = bsize;
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  samples = size / (2 * enc->channels);
  speex_bits_reset (&enc->bits);

  /* FIXME what about dropped samples if DTS enabled ?? */

  while (size) {
    GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)", frame_size, bytes);

    if (enc->channels == 2) {
      speex_encode_stereo_int ((gint16 *) data, frame_size, &enc->bits);
    }
    dtx_ret += speex_encode_int (enc->state, (gint16 *) data, &enc->bits);

    data += bytes;
    size -= bytes;
  }

  speex_bits_insert_terminator (&enc->bits);
  outsize = speex_bits_nbytes (&enc->bits);

  if (bdata)
    gst_buffer_unmap (buf, &map);

#if 0
  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, outsize,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if ((GST_FLOW_OK != ret))
    goto done;
#endif
  outbuf = gst_buffer_new_allocate (NULL, outsize, NULL);
  gst_buffer_map (outbuf, &map, GST_MAP_WRITE);

  written = speex_bits_write (&enc->bits, (gchar *) map.data, outsize);

  if (G_UNLIKELY (written < outsize)) {
    GST_ERROR_OBJECT (enc, "short write: %d < %d bytes", written, outsize);
  } else if (G_UNLIKELY (written > outsize)) {
    GST_ERROR_OBJECT (enc, "overrun: %d > %d bytes", written, outsize);
    written = outsize;
  }
  gst_buffer_unmap (outbuf, &map);
  gst_buffer_resize (outbuf, 0, written);

  if (!dtx_ret)
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);

  ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc),
      outbuf, samples);

done:
  g_free (data0);
  return ret;
}
コード例 #18
0
ファイル: gstsbcenc.c プロジェクト: asrashley/gst-plugins-bad
static GstFlowReturn
gst_sbc_enc_handle_frame (GstAudioEncoder * audio_enc, GstBuffer * buffer)
{
  GstSbcEnc *enc = GST_SBC_ENC (audio_enc);
  GstMapInfo in_map, out_map;
  GstBuffer *outbuf = NULL;
  guint samples_per_frame, frames, i = 0;

  /* no fancy draining */
  if (buffer == NULL)
    return GST_FLOW_OK;

  if (G_UNLIKELY (enc->channels == 0 || enc->blocks == 0 || enc->subbands == 0))
    return GST_FLOW_NOT_NEGOTIATED;

  samples_per_frame = enc->channels * enc->blocks * enc->subbands;

  if (!gst_buffer_map (buffer, &in_map, GST_MAP_READ))
    goto map_failed;

  frames = in_map.size / (samples_per_frame * sizeof (gint16));

  GST_LOG_OBJECT (enc,
      "encoding %" G_GSIZE_FORMAT " samples into %u SBC frames",
      in_map.size / (enc->channels * sizeof (gint16)), frames);

  if (frames > 0) {
    gsize frame_len;

    frame_len = sbc_get_frame_length (&enc->sbc);
    outbuf = gst_audio_encoder_allocate_output_buffer (audio_enc,
        frames * frame_len);

    if (outbuf == NULL)
      goto no_buffer;

    gst_buffer_map (outbuf, &out_map, GST_MAP_WRITE);

    for (i = 0; i < frames; ++i) {
      gssize ret, written = 0;

      ret = sbc_encode (&enc->sbc, in_map.data + (i * samples_per_frame * 2),
          samples_per_frame * 2, out_map.data + (i * frame_len), frame_len,
          &written);

      if (ret < 0 || written != frame_len) {
        GST_WARNING_OBJECT (enc, "encoding error, ret = %" G_GSSIZE_FORMAT ", "
            "written = %" G_GSSIZE_FORMAT, ret, written);
        break;
      }
    }

    gst_buffer_unmap (outbuf, &out_map);

    if (i > 0)
      gst_buffer_set_size (outbuf, i * frame_len);
    else
      gst_buffer_replace (&outbuf, NULL);
  }

done:

  gst_buffer_unmap (buffer, &in_map);

  return gst_audio_encoder_finish_frame (audio_enc, outbuf,
      i * (samples_per_frame / enc->channels));

/* ERRORS */
no_buffer:
  {
    GST_ERROR_OBJECT (enc, "could not allocate output buffer");
    goto done;
  }
map_failed:
  {
    GST_ERROR_OBJECT (enc, "could not map input buffer");
    goto done;
  }
}
コード例 #19
0
ファイル: gstomxaudioenc.c プロジェクト: 01org/gst-omx
static void
gst_omx_audio_enc_loop (GstOMXAudioEnc * self)
{
  GstOMXAudioEncClass *klass;
  GstOMXPort *port = self->enc_out_port;
  GstOMXBuffer *buf = NULL;
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstOMXAcquireBufferReturn acq_return;
  OMX_ERRORTYPE err;

  klass = GST_OMX_AUDIO_ENC_GET_CLASS (self);

  acq_return = gst_omx_port_acquire_buffer (port, &buf);
  if (acq_return == GST_OMX_ACQUIRE_BUFFER_ERROR) {
    goto component_error;
  } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_FLUSHING) {
    goto flushing;
  } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_EOS) {
    goto eos;
  }

  if (!gst_pad_has_current_caps (GST_AUDIO_ENCODER_SRC_PAD (self))
      || acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
    GstAudioInfo *info =
        gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self));
    GstCaps *caps;

    GST_DEBUG_OBJECT (self, "Port settings have changed, updating caps");

    /* Reallocate all buffers */
    if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
      err = gst_omx_port_set_enabled (port, FALSE);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_buffers_released (port, 5 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_deallocate_buffers (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_enabled (port, 1 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

    }

    GST_AUDIO_ENCODER_STREAM_LOCK (self);

    caps = klass->get_caps (self, self->enc_out_port, info);
    if (!caps) {
      if (buf)
        gst_omx_port_release_buffer (self->enc_out_port, buf);
      GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
      goto caps_failed;
    }

    GST_DEBUG_OBJECT (self, "Setting output caps: %" GST_PTR_FORMAT, caps);

    if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) {
      gst_caps_unref (caps);
      if (buf)
        gst_omx_port_release_buffer (self->enc_out_port, buf);
      GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
      goto caps_failed;
    }
    gst_caps_unref (caps);

    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);

    if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
      err = gst_omx_port_set_enabled (port, TRUE);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_allocate_buffers (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_enabled (port, 5 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_populate (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_mark_reconfigured (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;
    }

    /* Now get a buffer */
    if (acq_return != GST_OMX_ACQUIRE_BUFFER_OK) {
      return;
    }
  }

  g_assert (acq_return == GST_OMX_ACQUIRE_BUFFER_OK);
  if (!buf) {
    g_assert ((klass->cdata.hacks & GST_OMX_HACK_NO_EMPTY_EOS_BUFFER));
    GST_AUDIO_ENCODER_STREAM_LOCK (self);
    goto eos;
  }

  GST_DEBUG_OBJECT (self, "Handling buffer: 0x%08x %" G_GUINT64_FORMAT,
      (guint) buf->omx_buf->nFlags, (guint64) buf->omx_buf->nTimeStamp);

  /* This prevents a deadlock between the srcpad stream
   * lock and the videocodec stream lock, if ::reset()
   * is called at the wrong time
   */
  if (gst_omx_port_is_flushing (self->enc_out_port)) {
    GST_DEBUG_OBJECT (self, "Flushing");
    gst_omx_port_release_buffer (self->enc_out_port, buf);
    goto flushing;
  }

  GST_AUDIO_ENCODER_STREAM_LOCK (self);

  if ((buf->omx_buf->nFlags & OMX_BUFFERFLAG_CODECCONFIG)
      && buf->omx_buf->nFilledLen > 0) {
    GstCaps *caps;
    GstBuffer *codec_data;
    GstMapInfo map = GST_MAP_INFO_INIT;

    GST_DEBUG_OBJECT (self, "Handling codec data");
    caps =
        gst_caps_copy (gst_pad_get_current_caps (GST_AUDIO_ENCODER_SRC_PAD
            (self)));
    codec_data = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen);

    gst_buffer_map (codec_data, &map, GST_MAP_WRITE);
    memcpy (map.data,
        buf->omx_buf->pBuffer + buf->omx_buf->nOffset,
        buf->omx_buf->nFilledLen);
    gst_buffer_unmap (codec_data, &map);

    gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, codec_data, NULL);
    if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) {
      gst_caps_unref (caps);
      if (buf)
        gst_omx_port_release_buffer (self->enc_out_port, buf);
      GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
      goto caps_failed;
    }
    gst_caps_unref (caps);
    flow_ret = GST_FLOW_OK;
  } else if (buf->omx_buf->nFilledLen > 0) {
    GstBuffer *outbuf;
    guint n_samples;

    GST_DEBUG_OBJECT (self, "Handling output data");

    n_samples =
        klass->get_num_samples (self, self->enc_out_port,
        gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self)), buf);

    if (buf->omx_buf->nFilledLen > 0) {
      GstMapInfo map = GST_MAP_INFO_INIT;
      outbuf = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen);

      gst_buffer_map (outbuf, &map, GST_MAP_WRITE);

      memcpy (map.data,
          buf->omx_buf->pBuffer + buf->omx_buf->nOffset,
          buf->omx_buf->nFilledLen);
      gst_buffer_unmap (outbuf, &map);

    } else {
      outbuf = gst_buffer_new ();
    }

    GST_BUFFER_TIMESTAMP (outbuf) =
        gst_util_uint64_scale (buf->omx_buf->nTimeStamp, GST_SECOND,
        OMX_TICKS_PER_SECOND);
    if (buf->omx_buf->nTickCount != 0)
      GST_BUFFER_DURATION (outbuf) =
          gst_util_uint64_scale (buf->omx_buf->nTickCount, GST_SECOND,
          OMX_TICKS_PER_SECOND);

    flow_ret =
        gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (self),
        outbuf, n_samples);
  }

  GST_DEBUG_OBJECT (self, "Handled output data");

  GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret));

  err = gst_omx_port_release_buffer (port, buf);
  if (err != OMX_ErrorNone)
    goto release_error;

  self->downstream_flow_ret = flow_ret;

  if (flow_ret != GST_FLOW_OK)
    goto flow_error;

  GST_AUDIO_ENCODER_STREAM_UNLOCK (self);

  return;

component_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL),
        ("OpenMAX component in error state %s (0x%08x)",
            gst_omx_component_get_last_error_string (self->enc),
            gst_omx_component_get_last_error (self->enc)));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    self->started = FALSE;
    return;
  }
flushing:
  {
    GST_DEBUG_OBJECT (self, "Flushing -- stopping task");
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_FLUSHING;
    self->started = FALSE;
    return;
  }
eos:
  {
    g_mutex_lock (&self->drain_lock);
    if (self->draining) {
      GST_DEBUG_OBJECT (self, "Drained");
      self->draining = FALSE;
      g_cond_broadcast (&self->drain_cond);
      flow_ret = GST_FLOW_OK;
      gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    } else {
      GST_DEBUG_OBJECT (self, "Component signalled EOS");
      flow_ret = GST_FLOW_EOS;
    }
    g_mutex_unlock (&self->drain_lock);

    GST_AUDIO_ENCODER_STREAM_LOCK (self);
    self->downstream_flow_ret = flow_ret;

    /* Here we fallback and pause the task for the EOS case */
    if (flow_ret != GST_FLOW_OK)
      goto flow_error;

    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);

    return;
  }
flow_error:
  {
    if (flow_ret == GST_FLOW_EOS) {
      GST_DEBUG_OBJECT (self, "EOS");

      gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    } else if (flow_ret == GST_FLOW_NOT_LINKED || flow_ret < GST_FLOW_EOS) {
      GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."),
          ("stream stopped, reason %s", gst_flow_get_name (flow_ret)));

      gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    }
    self->started = FALSE;
    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
    return;
  }
reconfigure_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Unable to reconfigure output port"));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED;
    self->started = FALSE;
    return;
  }
caps_failed:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to set caps"));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED;
    self->started = FALSE;
    return;
  }
release_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Failed to relase output buffer to component: %s (0x%08x)",
            gst_omx_error_to_string (err), err));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    self->started = FALSE;
    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
    return;
  }
}
コード例 #20
0
static GstFlowReturn
gst_siren_enc_handle_frame (GstAudioEncoder * benc, GstBuffer * buf)
{
  GstSirenEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *out_buf;
  guint8 *in_data, *out_data;
  guint i, size, num_frames;
  gint out_size;
#ifndef GST_DISABLE_GST_DEBUG
  gint in_size;
#endif
  gint encode_ret;
  GstMapInfo inmap, outmap;

  g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);

  enc = GST_SIREN_ENC (benc);

  size = gst_buffer_get_size (buf);

  GST_LOG_OBJECT (enc, "Received buffer of size %d", size);

  g_return_val_if_fail (size > 0, GST_FLOW_ERROR);
  g_return_val_if_fail (size % 640 == 0, GST_FLOW_ERROR);

  /* we need to process 640 input bytes to produce 40 output bytes */
  /* calculate the amount of frames we will handle */
  num_frames = size / 640;

  /* this is the input/output size */
#ifndef GST_DISABLE_GST_DEBUG
  in_size = num_frames * 640;
#endif
  out_size = num_frames * 40;

  GST_LOG_OBJECT (enc, "we have %u frames, %u in, %u out", num_frames, in_size,
      out_size);

  /* get a buffer */
  out_buf = gst_audio_encoder_allocate_output_buffer (benc, out_size);
  if (out_buf == NULL)
    goto alloc_failed;

  /* get the input data for all the frames */
  gst_buffer_map (buf, &inmap, GST_MAP_READ);
  gst_buffer_map (out_buf, &outmap, GST_MAP_READ);
  in_data = inmap.data;
  out_data = outmap.data;

  for (i = 0; i < num_frames; i++) {
    GST_LOG_OBJECT (enc, "Encoding frame %u/%u", i, num_frames);

    /* encode 640 input bytes to 40 output bytes */
    encode_ret = Siren7_EncodeFrame (enc->encoder, in_data, out_data);
    if (encode_ret != 0)
      goto encode_error;

    /* move to next frame */
    out_data += 40;
    in_data += 640;
  }

  gst_buffer_unmap (buf, &inmap);
  gst_buffer_unmap (out_buf, &outmap);

  GST_LOG_OBJECT (enc, "Finished encoding");

  /* we encode all we get, pass it along */
  ret = gst_audio_encoder_finish_frame (benc, out_buf, -1);

done:
  return ret;

  /* ERRORS */
alloc_failed:
  {
    GST_DEBUG_OBJECT (enc, "failed to pad_alloc buffer: %d (%s)", ret,
        gst_flow_get_name (ret));
    goto done;
  }
encode_error:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("Error encoding frame: %d", encode_ret));
    ret = GST_FLOW_ERROR;
    gst_buffer_unref (out_buf);
    goto done;
  }
}