Beispiel #1
0
static void
gst_omx_audio_enc_flush (GstAudioEncoder * encoder)
{
  GstOMXAudioEnc *self;

  self = GST_OMX_AUDIO_ENC (encoder);

  GST_DEBUG_OBJECT (self, "Resetting encoder");

  gst_omx_audio_enc_drain (self);

  gst_omx_port_set_flushing (self->enc_in_port, 5 * GST_SECOND, TRUE);
  gst_omx_port_set_flushing (self->enc_out_port, 5 * GST_SECOND, TRUE);

  /* Wait until the srcpad loop is finished */
  GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
  GST_PAD_STREAM_LOCK (GST_AUDIO_ENCODER_SRC_PAD (self));
  GST_PAD_STREAM_UNLOCK (GST_AUDIO_ENCODER_SRC_PAD (self));
  GST_AUDIO_ENCODER_STREAM_LOCK (self);

  gst_omx_port_set_flushing (self->enc_in_port, 5 * GST_SECOND, FALSE);
  gst_omx_port_set_flushing (self->enc_out_port, 5 * GST_SECOND, FALSE);
  gst_omx_port_populate (self->enc_out_port);

  /* Start the srcpad loop again */
  self->last_upstream_ts = 0;
  self->downstream_flow_ret = GST_FLOW_OK;
  self->eos = FALSE;
  gst_pad_start_task (GST_AUDIO_ENCODER_SRC_PAD (self),
      (GstTaskFunction) gst_omx_audio_enc_loop, encoder, NULL);
}
Beispiel #2
0
/* push out the buffer and do internal bookkeeping */
static GstFlowReturn
gst_vorbis_enc_push_header (GstVorbisEnc * vorbisenc, GstBuffer * buffer)
{
  GST_DEBUG_OBJECT (vorbisenc,
      "Pushing buffer with GP %" G_GINT64_FORMAT ", ts %" GST_TIME_FORMAT,
      GST_BUFFER_OFFSET_END (buffer),
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
  gst_buffer_set_caps (buffer,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (vorbisenc)));
  return gst_pad_push (GST_AUDIO_ENCODER_SRC_PAD (vorbisenc), buffer);
}
Beispiel #3
0
/* push out the buffer */
static GstFlowReturn
gst_celt_enc_push_buffer (GstCeltEnc * enc, GstBuffer * buffer)
{
  guint size;

  size = GST_BUFFER_SIZE (buffer);
  GST_DEBUG_OBJECT (enc, "pushing output buffer of size %u", size);

  gst_buffer_set_caps (buffer, GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)));
  return gst_pad_push (GST_AUDIO_ENCODER_SRC_PAD (enc), buffer);
}
static gboolean
gst_mulawenc_set_format (GstAudioEncoder * audioenc, GstAudioInfo * info)
{
  GstCaps *base_caps;
  GstStructure *structure;
  GstMuLawEnc *mulawenc = GST_MULAWENC (audioenc);
  gboolean ret;

  mulawenc->rate = info->rate;
  mulawenc->channels = info->channels;

  base_caps =
      gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (audioenc));
  g_assert (base_caps);
  base_caps = gst_caps_make_writable (base_caps);
  g_assert (base_caps);

  structure = gst_caps_get_structure (base_caps, 0);
  g_assert (structure);
  gst_structure_set (structure, "rate", G_TYPE_INT, mulawenc->rate, NULL);
  gst_structure_set (structure, "channels", G_TYPE_INT, mulawenc->channels,
      NULL);

  gst_mulawenc_set_tags (mulawenc);

  ret = gst_audio_encoder_set_output_format (audioenc, base_caps);
  gst_caps_unref (base_caps);

  return ret;
}
Beispiel #5
0
static gboolean
gst_omx_audio_enc_stop (GstAudioEncoder * encoder)
{
  GstOMXAudioEnc *self;

  self = GST_OMX_AUDIO_ENC (encoder);

  GST_DEBUG_OBJECT (self, "Stopping encoder");

  gst_omx_port_set_flushing (self->enc_in_port, 5 * GST_SECOND, TRUE);
  gst_omx_port_set_flushing (self->enc_out_port, 5 * GST_SECOND, TRUE);

  gst_pad_stop_task (GST_AUDIO_ENCODER_SRC_PAD (encoder));

  if (gst_omx_component_get_state (self->enc, 0) > OMX_StateIdle)
    gst_omx_component_set_state (self->enc, OMX_StateIdle);

  self->downstream_flow_ret = GST_FLOW_FLUSHING;
  self->started = FALSE;
  self->eos = FALSE;

  g_mutex_lock (&self->drain_lock);
  self->draining = FALSE;
  g_cond_broadcast (&self->drain_cond);
  g_mutex_unlock (&self->drain_lock);

  gst_omx_component_get_state (self->enc, 5 * GST_SECOND);

  return TRUE;
}
static GstFlowReturn
gst_vorbis_enc_output_buffers (GstVorbisEnc * vorbisenc)
{
  GstFlowReturn ret;

  /* vorbis does some data preanalysis, then divides up blocks for
     more involved (potentially parallel) processing.  Get a single
     block for encoding now */
  while (vorbis_analysis_blockout (&vorbisenc->vd, &vorbisenc->vb) == 1) {
    ogg_packet op;

    GST_LOG_OBJECT (vorbisenc, "analysed to a block");

    /* analysis */
    vorbis_analysis (&vorbisenc->vb, NULL);
    vorbis_bitrate_addblock (&vorbisenc->vb);

    while (vorbis_bitrate_flushpacket (&vorbisenc->vd, &op)) {
      GstBuffer *buf;

      if (op.e_o_s) {
        GstAudioEncoder *enc = GST_AUDIO_ENCODER (vorbisenc);
        GstClockTime duration;

        GST_DEBUG_OBJECT (vorbisenc, "Got EOS packet from libvorbis");
        GST_AUDIO_ENCODER_STREAM_LOCK (enc);
        if (!GST_CLOCK_TIME_IS_VALID (enc->output_segment.stop)) {
          GST_DEBUG_OBJECT (vorbisenc,
              "Output segment has no end time, setting");
          duration =
              gst_util_uint64_scale (op.granulepos, GST_SECOND,
              vorbisenc->frequency);
          enc->output_segment.stop = enc->output_segment.start + duration;
          GST_DEBUG_OBJECT (enc, "new output segment %" GST_SEGMENT_FORMAT,
              &enc->output_segment);
          gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (enc),
              gst_event_new_segment (&enc->output_segment));
        }
        GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
      }

      GST_LOG_OBJECT (vorbisenc, "pushing out a data packet");
      buf =
          gst_audio_encoder_allocate_output_buffer (GST_AUDIO_ENCODER
          (vorbisenc), op.bytes);
      gst_buffer_fill (buf, 0, op.packet, op.bytes);
      /* tracking granulepos should tell us samples accounted for */
      ret =
          gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER
          (vorbisenc), buf, op.granulepos - vorbisenc->samples_out);
      vorbisenc->samples_out = op.granulepos;

      if (ret != GST_FLOW_OK)
        return ret;
    }
  }

  return GST_FLOW_OK;
}
/* push out the buffer */
static GstFlowReturn
gst_speex_enc_push_buffer (GstSpeexEnc * enc, GstBuffer * buffer)
{
  guint size;

  size = GST_BUFFER_SIZE (buffer);
  GST_DEBUG_OBJECT (enc, "pushing output buffer of size %u", size);

  return gst_pad_push (GST_AUDIO_ENCODER_SRC_PAD (enc), buffer);
}
Beispiel #8
0
static gboolean
gst_celt_enc_set_format (GstAudioEncoder * benc, GstAudioInfo * info)
{
  GstCeltEnc *enc;
  GstCaps *otherpadcaps;

  enc = GST_CELT_ENC (benc);

  enc->channels = GST_AUDIO_INFO_CHANNELS (info);
  enc->rate = GST_AUDIO_INFO_RATE (info);

  /* handle reconfigure */
  if (enc->state) {
    celt_encoder_destroy (enc->state);
    enc->state = NULL;
  }
  if (enc->mode) {
    celt_mode_destroy (enc->mode);
    enc->mode = NULL;
  }
  memset (&enc->header, 0, sizeof (enc->header));

  otherpadcaps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (enc));
  if (otherpadcaps) {
    if (!gst_caps_is_empty (otherpadcaps)) {
      GstStructure *ps = gst_caps_get_structure (otherpadcaps, 0);
      gst_structure_get_int (ps, "frame-size", &enc->frame_size);
    }
    gst_caps_unref (otherpadcaps);
  }

  if (enc->requested_frame_size > 0)
    enc->frame_size = enc->requested_frame_size;

  GST_DEBUG_OBJECT (enc, "channels=%d rate=%d frame-size=%d",
      enc->channels, enc->rate, enc->frame_size);

  if (!gst_celt_enc_setup (enc))
    return FALSE;

  /* feedback to base class */
  gst_audio_encoder_set_latency (benc,
      gst_celt_enc_get_latency (enc), gst_celt_enc_get_latency (enc));
  gst_audio_encoder_set_frame_samples_min (benc, enc->frame_size);
  gst_audio_encoder_set_frame_samples_max (benc, enc->frame_size);
  gst_audio_encoder_set_frame_max (benc, 1);

  return TRUE;
}
Beispiel #9
0
static gboolean
gst_siren_enc_set_format (GstAudioEncoder * benc, GstAudioInfo * info)
{
  GstSirenEnc *enc;
  gboolean res;
  GstCaps *outcaps;

  enc = GST_SIREN_ENC (benc);

  outcaps = gst_static_pad_template_get_caps (&srctemplate);
  res = gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc), outcaps);
  gst_caps_unref (outcaps);

  /* report needs to base class */
  gst_audio_encoder_set_frame_samples_min (benc, 320);
  gst_audio_encoder_set_frame_samples_max (benc, 320);
  /* no remainder or flushing please */
  gst_audio_encoder_set_hard_min (benc, TRUE);
  gst_audio_encoder_set_drainable (benc, FALSE);

  return res;
}
Beispiel #10
0
static GstFlowReturn
gst_opus_enc_handle_frame (GstAudioEncoder * benc, GstBuffer * buf)
{
  GstOpusEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;

  enc = GST_OPUS_ENC (benc);
  GST_DEBUG_OBJECT (enc, "handle_frame");

  if (!enc->header_sent) {
    GstCaps *caps;

    g_slist_foreach (enc->headers, (GFunc) gst_buffer_unref, NULL);
    g_slist_free (enc->headers);
    enc->headers = NULL;

    gst_opus_header_create_caps (&caps, &enc->headers, enc->n_channels,
        enc->n_stereo_streams, enc->sample_rate, enc->channel_mapping_family,
        enc->decoding_channel_mapping,
        gst_tag_setter_get_tag_list (GST_TAG_SETTER (enc)));


    /* negotiate with these caps */
    GST_DEBUG_OBJECT (enc, "here are the caps: %" GST_PTR_FORMAT, caps);

    gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc), caps);
    gst_caps_unref (caps);

    enc->header_sent = TRUE;
  }

  GST_DEBUG_OBJECT (enc, "received buffer %p of %u bytes", buf,
      buf ? GST_BUFFER_SIZE (buf) : 0);

  ret = gst_opus_enc_encode (enc, buf);

  return ret;
}
/* set up the encoder state */
static gboolean
gst_two_lame_setup (GstTwoLame * twolame)
{

#define CHECK_ERROR(command) G_STMT_START {\
  if ((command) < 0) { \
    GST_ERROR_OBJECT (twolame, "setup failed: " G_STRINGIFY (command)); \
    return FALSE; \
  } \
}G_STMT_END

  int retval;
  GstCaps *allowed_caps;

  GST_DEBUG_OBJECT (twolame, "starting setup");

  /* check if we're already setup; if we are, we might want to check
   * if this initialization is compatible with the previous one */
  /* FIXME: do this */
  if (twolame->setup) {
    GST_WARNING_OBJECT (twolame, "already setup");
    twolame->setup = FALSE;
  }

  twolame->glopts = twolame_init ();

  if (twolame->glopts == NULL)
    return FALSE;

  /* copy the parameters over */
  twolame_set_in_samplerate (twolame->glopts, twolame->samplerate);

  /* let twolame choose default samplerate unless outgoing sample rate is fixed */
  allowed_caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (twolame));

  if (allowed_caps != NULL) {
    GstStructure *structure;
    gint samplerate;

    structure = gst_caps_get_structure (allowed_caps, 0);

    if (gst_structure_get_int (structure, "rate", &samplerate)) {
      GST_DEBUG_OBJECT (twolame,
          "Setting sample rate to %d as fixed in src caps", samplerate);
      twolame_set_out_samplerate (twolame->glopts, samplerate);
    } else {
      GST_DEBUG_OBJECT (twolame, "Letting twolame choose sample rate");
      twolame_set_out_samplerate (twolame->glopts, 0);
    }
    gst_caps_unref (allowed_caps);
    allowed_caps = NULL;
  } else {
    GST_DEBUG_OBJECT (twolame,
        "No peer yet, letting twolame choose sample rate");
    twolame_set_out_samplerate (twolame->glopts, 0);
  }

  /* force mono encoding if we only have one channel */
  if (twolame->num_channels == 1)
    twolame->mode = 3;

  /* Fix bitrates and MPEG version */

  CHECK_ERROR (twolame_set_num_channels (twolame->glopts,
          twolame->num_channels));

  CHECK_ERROR (twolame_set_mode (twolame->glopts, twolame->mode));
  CHECK_ERROR (twolame_set_psymodel (twolame->glopts, twolame->psymodel));
  CHECK_AND_FIXUP_BITRATE (twolame, "bitrate", twolame->bitrate);
  CHECK_ERROR (twolame_set_bitrate (twolame->glopts, twolame->bitrate));
  CHECK_ERROR (twolame_set_padding (twolame->glopts, twolame->padding));
  CHECK_ERROR (twolame_set_energy_levels (twolame->glopts,
          twolame->energy_level_extension));
  CHECK_ERROR (twolame_set_emphasis (twolame->glopts, twolame->emphasis));
  CHECK_ERROR (twolame_set_error_protection (twolame->glopts,
          twolame->error_protection));
  CHECK_ERROR (twolame_set_copyright (twolame->glopts, twolame->copyright));
  CHECK_ERROR (twolame_set_original (twolame->glopts, twolame->original));
  CHECK_ERROR (twolame_set_VBR (twolame->glopts, twolame->vbr));
  CHECK_ERROR (twolame_set_VBR_level (twolame->glopts, twolame->vbr_level));
  CHECK_ERROR (twolame_set_ATH_level (twolame->glopts, twolame->ath_level));
  CHECK_AND_FIXUP_BITRATE (twolame, "vbr-max-bitrate",
      twolame->vbr_max_bitrate);
  CHECK_ERROR (twolame_set_VBR_max_bitrate_kbps (twolame->glopts,
          twolame->vbr_max_bitrate));
  CHECK_ERROR (twolame_set_quick_mode (twolame->glopts, twolame->quick_mode));
  CHECK_ERROR (twolame_set_quick_count (twolame->glopts,
          twolame->quick_mode_count));

  /* initialize the twolame encoder */
  if ((retval = twolame_init_params (twolame->glopts)) >= 0) {
    twolame->setup = TRUE;
    /* FIXME: it would be nice to print out the mode here */
    GST_INFO ("twolame encoder setup (%d kbit/s, %d Hz, %d channels)",
        twolame->bitrate, twolame->samplerate, twolame->num_channels);
  } else {
    GST_ERROR_OBJECT (twolame, "twolame_init_params returned %d", retval);
  }

  GST_DEBUG_OBJECT (twolame, "done with setup");

  return twolame->setup;
#undef CHECK_ERROR
}
Beispiel #12
0
static GstFlowReturn
gst_vorbis_enc_handle_frame (GstAudioEncoder * enc, GstBuffer * buffer)
{
  GstVorbisEnc *vorbisenc;
  GstFlowReturn ret = GST_FLOW_OK;
  gfloat *data;
  gulong size;
  gulong i, j;
  float **vorbis_buffer;
  GstBuffer *buf1, *buf2, *buf3;

  vorbisenc = GST_VORBISENC (enc);

  if (G_UNLIKELY (!vorbisenc->setup)) {
    if (buffer) {
      GST_DEBUG_OBJECT (vorbisenc, "forcing setup");
      /* should not fail, as setup before same way */
      if (!gst_vorbis_enc_setup (vorbisenc))
        return GST_FLOW_ERROR;
    } else {
      /* end draining */
      GST_LOG_OBJECT (vorbisenc, "already drained");
      return GST_FLOW_OK;
    }
  }

  if (!vorbisenc->header_sent) {
    /* Vorbis streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libvorbis one at a time;
       libvorbis handles the additional Ogg bitstream constraints */
    ogg_packet header;
    ogg_packet header_comm;
    ogg_packet header_code;
    GstCaps *caps;

    GST_DEBUG_OBJECT (vorbisenc, "creating and sending header packets");
    gst_vorbis_enc_set_metadata (vorbisenc);
    vorbis_analysis_headerout (&vorbisenc->vd, &vorbisenc->vc, &header,
        &header_comm, &header_code);
    vorbis_comment_clear (&vorbisenc->vc);

    /* create header buffers */
    buf1 = gst_vorbis_enc_buffer_from_header_packet (vorbisenc, &header);
    buf2 = gst_vorbis_enc_buffer_from_header_packet (vorbisenc, &header_comm);
    buf3 = gst_vorbis_enc_buffer_from_header_packet (vorbisenc, &header_code);

    /* mark and put on caps */
    caps = gst_caps_new_simple ("audio/x-vorbis", NULL);
    caps = _gst_caps_set_buffer_array (caps, "streamheader",
        buf1, buf2, buf3, NULL);

    /* negotiate with these caps */
    GST_DEBUG_OBJECT (vorbisenc, "here are the caps: %" GST_PTR_FORMAT, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_buffer_set_caps (buf3, caps);
    gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (vorbisenc), caps);
    gst_caps_unref (caps);

    /* store buffers for later pre_push sending */
    g_slist_foreach (vorbisenc->headers, (GFunc) gst_buffer_unref, NULL);
    vorbisenc->headers = NULL;
    GST_DEBUG_OBJECT (vorbisenc, "storing header buffers");
    vorbisenc->headers = g_slist_prepend (vorbisenc->headers, buf3);
    vorbisenc->headers = g_slist_prepend (vorbisenc->headers, buf2);
    vorbisenc->headers = g_slist_prepend (vorbisenc->headers, buf1);

    vorbisenc->header_sent = TRUE;
  }

  if (!buffer)
    return gst_vorbis_enc_clear (vorbisenc);

  /* data to encode */
  data = (gfloat *) GST_BUFFER_DATA (buffer);
  size = GST_BUFFER_SIZE (buffer) / (vorbisenc->channels * sizeof (float));

  /* expose the buffer to submit data */
  vorbis_buffer = vorbis_analysis_buffer (&vorbisenc->vd, size);

  /* deinterleave samples, write the buffer data */
  for (i = 0; i < size; i++) {
    for (j = 0; j < vorbisenc->channels; j++) {
      vorbis_buffer[j][i] = *data++;
    }
  }

  /* tell the library how much we actually submitted */
  vorbis_analysis_wrote (&vorbisenc->vd, size);

  GST_LOG_OBJECT (vorbisenc, "wrote %lu samples to vorbis", size);

  vorbisenc->samples_in += size;

  ret = gst_vorbis_enc_output_buffers (vorbisenc);

  return ret;
}
Beispiel #13
0
static gboolean
gst_ffmpegaudenc_set_format (GstAudioEncoder * encoder, GstAudioInfo * info)
{
    GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
    GstCaps *other_caps;
    GstCaps *allowed_caps;
    GstCaps *icaps;
    gsize frame_size;
    GstFFMpegAudEncClass *oclass =
        (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);

    /* close old session */
    if (ffmpegaudenc->opened) {
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        ffmpegaudenc->opened = FALSE;
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0) {
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");
            return FALSE;
        }
    }

    /* if we set it in _getcaps we should set it also in _link */
    ffmpegaudenc->context->strict_std_compliance = ffmpegaudenc->compliance;

    /* user defined properties */
    if (ffmpegaudenc->bitrate > 0) {
        GST_INFO_OBJECT (ffmpegaudenc, "Setting avcontext to bitrate %d",
                         ffmpegaudenc->bitrate);
        ffmpegaudenc->context->bit_rate = ffmpegaudenc->bitrate;
        ffmpegaudenc->context->bit_rate_tolerance = ffmpegaudenc->bitrate;
    } else {
        GST_INFO_OBJECT (ffmpegaudenc,
                         "Using avcontext default bitrate %" G_GINT64_FORMAT,
                         (gint64) ffmpegaudenc->context->bit_rate);
    }

    /* RTP payload used for GOB production (for Asterisk) */
    if (ffmpegaudenc->rtp_payload_size) {
        ffmpegaudenc->context->rtp_payload_size = ffmpegaudenc->rtp_payload_size;
    }

    /* some other defaults */
    ffmpegaudenc->context->rc_strategy = 2;
    ffmpegaudenc->context->b_frame_strategy = 0;
    ffmpegaudenc->context->coder_type = 0;
    ffmpegaudenc->context->context_model = 0;
    ffmpegaudenc->context->scenechange_threshold = 0;

    /* fetch pix_fmt and so on */
    gst_ffmpeg_audioinfo_to_context (info, ffmpegaudenc->context);
    if (!ffmpegaudenc->context->time_base.den) {
        ffmpegaudenc->context->time_base.den = GST_AUDIO_INFO_RATE (info);
        ffmpegaudenc->context->time_base.num = 1;
        ffmpegaudenc->context->ticks_per_frame = 1;
    }

    if (ffmpegaudenc->context->channel_layout) {
        gst_ffmpeg_channel_layout_to_gst (ffmpegaudenc->context->channel_layout,
                                          ffmpegaudenc->context->channels, ffmpegaudenc->ffmpeg_layout);
        ffmpegaudenc->needs_reorder =
            (memcmp (ffmpegaudenc->ffmpeg_layout, info->position,
                     sizeof (GstAudioChannelPosition) *
                     ffmpegaudenc->context->channels) != 0);
    }

    /* some codecs support more than one format, first auto-choose one */
    GST_DEBUG_OBJECT (ffmpegaudenc, "picking an output format ...");
    allowed_caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
    if (!allowed_caps) {
        GST_DEBUG_OBJECT (ffmpegaudenc, "... but no peer, using template caps");
        /* we need to copy because get_allowed_caps returns a ref, and
         * get_pad_template_caps doesn't */
        allowed_caps =
            gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
    }
    GST_DEBUG_OBJECT (ffmpegaudenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
    gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
                                  oclass->in_plugin->type, allowed_caps, ffmpegaudenc->context);

    /* open codec */
    if (gst_ffmpeg_avcodec_open (ffmpegaudenc->context, oclass->in_plugin) < 0) {
        gst_caps_unref (allowed_caps);
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        GST_DEBUG_OBJECT (ffmpegaudenc, "avenc_%s: Failed to open FFMPEG codec",
                          oclass->in_plugin->name);
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0)
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");

        if ((oclass->in_plugin->capabilities & CODEC_CAP_EXPERIMENTAL) &&
                ffmpegaudenc->compliance != GST_FFMPEG_EXPERIMENTAL) {
            GST_ELEMENT_ERROR (ffmpegaudenc, LIBRARY, SETTINGS,
                               ("Codec is experimental, but settings don't allow encoders to "
                                "produce output of experimental quality"),
                               ("This codec may not create output that is conformant to the specs "
                                "or of good quality. If you must use it anyway, set the "
                                "compliance property to experimental"));
        }
        return FALSE;
    }

    /* try to set this caps on the other side */
    other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
                 ffmpegaudenc->context, TRUE);

    if (!other_caps) {
        gst_caps_unref (allowed_caps);
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        GST_DEBUG ("Unsupported codec - no caps found");
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0)
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");
        return FALSE;
    }

    icaps = gst_caps_intersect (allowed_caps, other_caps);
    gst_caps_unref (allowed_caps);
    gst_caps_unref (other_caps);
    if (gst_caps_is_empty (icaps)) {
        gst_caps_unref (icaps);
        return FALSE;
    }
    icaps = gst_caps_fixate (icaps);

    if (!gst_audio_encoder_set_output_format (GST_AUDIO_ENCODER (ffmpegaudenc),
            icaps)) {
        gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
        gst_caps_unref (icaps);
        if (avcodec_get_context_defaults3 (ffmpegaudenc->context,
                                           oclass->in_plugin) < 0)
            GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to set context defaults");
        return FALSE;
    }
    gst_caps_unref (icaps);

    frame_size = ffmpegaudenc->context->frame_size;
    if (frame_size > 1) {
        gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
                frame_size);
        gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
                frame_size);
        gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 1);
    } else {
        gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
                0);
        gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
                0);
        gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 0);
    }

    /* Store some tags */
    {
        GstTagList *tags = gst_tag_list_new_empty ();
        const gchar *codec;

        gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE, GST_TAG_NOMINAL_BITRATE,
                          (guint) ffmpegaudenc->context->bit_rate, NULL);

        if ((codec =
                    gst_ffmpeg_get_codecid_longname (ffmpegaudenc->context->codec_id)))
            gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE, GST_TAG_AUDIO_CODEC, codec,
                              NULL);

        gst_audio_encoder_merge_tags (encoder, tags, GST_TAG_MERGE_REPLACE);
        gst_tag_list_unref (tags);
    }

    /* success! */
    ffmpegaudenc->opened = TRUE;

    return TRUE;
}
static gboolean
gst_fdkaacenc_set_format (GstAudioEncoder * enc, GstAudioInfo * info)
{
  GstFdkAacEnc *self = GST_FDKAACENC (enc);
  gboolean ret = FALSE;
  GstCaps *allowed_caps;
  GstCaps *src_caps;
  AACENC_ERROR err;
  gint transmux = 0, aot = AOT_AAC_LC;
  gint mpegversion = 4;
  CHANNEL_MODE channel_mode;
  AACENC_InfoStruct enc_info = { 0 };
  gint bitrate;

  if (self->enc) {
    /* drain */
    gst_fdkaacenc_handle_frame (enc, NULL);
    aacEncClose (&self->enc);
  }

  allowed_caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (self));

  GST_DEBUG_OBJECT (self, "allowed caps: %" GST_PTR_FORMAT, allowed_caps);

  if (allowed_caps && gst_caps_get_size (allowed_caps) > 0) {
    GstStructure *s = gst_caps_get_structure (allowed_caps, 0);
    const gchar *str = NULL;

    if ((str = gst_structure_get_string (s, "stream-format"))) {
      if (strcmp (str, "adts") == 0) {
        GST_DEBUG_OBJECT (self, "use ADTS format for output");
        transmux = 2;
      } else if (strcmp (str, "adif") == 0) {
        GST_DEBUG_OBJECT (self, "use ADIF format for output");
        transmux = 1;
      } else if (strcmp (str, "raw") == 0) {
        GST_DEBUG_OBJECT (self, "use RAW format for output");
        transmux = 0;
      }
    }

    gst_structure_get_int (s, "mpegversion", &mpegversion);
  }
  if (allowed_caps)
    gst_caps_unref (allowed_caps);

  err = aacEncOpen (&self->enc, 0, GST_AUDIO_INFO_CHANNELS (info));
  if (err != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to open encoder: %d\n", err);
    return FALSE;
  }

  aot = AOT_AAC_LC;

  if ((err = aacEncoder_SetParam (self->enc, AACENC_AOT, aot)) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to set AOT %d: %d\n", aot, err);
    return FALSE;
  }

  if ((err = aacEncoder_SetParam (self->enc, AACENC_SAMPLERATE,
              GST_AUDIO_INFO_RATE (info))) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to set sample rate %d: %d\n",
        GST_AUDIO_INFO_RATE (info), err);
    return FALSE;
  }

  if (GST_AUDIO_INFO_CHANNELS (info) == 1) {
    channel_mode = MODE_1;
    self->need_reorder = FALSE;
    self->aac_positions = NULL;
  } else {
    guint64 in_channel_mask, out_channel_mask;
    gint i;

    for (i = 0; i < G_N_ELEMENTS (channel_layouts); i++) {
      if (channel_layouts[i].channels != GST_AUDIO_INFO_CHANNELS (info))
        continue;

      gst_audio_channel_positions_to_mask (&GST_AUDIO_INFO_POSITION (info, 0),
          GST_AUDIO_INFO_CHANNELS (info), FALSE, &in_channel_mask);
      gst_audio_channel_positions_to_mask (channel_layouts[i].positions,
          channel_layouts[i].channels, FALSE, &out_channel_mask);
      if (in_channel_mask == out_channel_mask) {
        channel_mode = channel_layouts[i].mode;
        self->need_reorder =
            memcmp (channel_layouts[i].positions,
            &GST_AUDIO_INFO_POSITION (info, 0),
            GST_AUDIO_INFO_CHANNELS (info) *
            sizeof (GstAudioChannelPosition)) != 0;
        self->aac_positions = channel_layouts[i].positions;
        break;
      }
    }

    if (i == G_N_ELEMENTS (channel_layouts)) {
      GST_ERROR_OBJECT (self, "Couldn't find a valid channel layout");
      return FALSE;
    }
  }

  if ((err = aacEncoder_SetParam (self->enc, AACENC_CHANNELMODE,
              channel_mode)) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to set channel mode %d: %d", channel_mode,
        err);
    return FALSE;
  }

  /* MPEG channel order */
  if ((err = aacEncoder_SetParam (self->enc, AACENC_CHANNELORDER,
              0)) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to set channel order %d: %d", channel_mode,
        err);
    return FALSE;
  }

  bitrate = self->bitrate;
  /* See
   * http://wiki.hydrogenaud.io/index.php?title=Fraunhofer_FDK_AAC#Recommended_Sampling_Rate_and_Bitrate_Combinations
   */
  if (bitrate == 0) {
    if (GST_AUDIO_INFO_CHANNELS (info) == 1) {
      if (GST_AUDIO_INFO_RATE (info) < 16000) {
        bitrate = 8000;
      } else if (GST_AUDIO_INFO_RATE (info) == 16000) {
        bitrate = 16000;
      } else if (GST_AUDIO_INFO_RATE (info) < 32000) {
        bitrate = 24000;
      } else if (GST_AUDIO_INFO_RATE (info) == 32000) {
        bitrate = 32000;
      } else if (GST_AUDIO_INFO_RATE (info) <= 44100) {
        bitrate = 56000;
      } else {
        bitrate = 160000;
      }
    } else if (GST_AUDIO_INFO_CHANNELS (info) == 2) {
      if (GST_AUDIO_INFO_RATE (info) < 16000) {
        bitrate = 16000;
      } else if (GST_AUDIO_INFO_RATE (info) == 16000) {
        bitrate = 24000;
      } else if (GST_AUDIO_INFO_RATE (info) < 22050) {
        bitrate = 32000;
      } else if (GST_AUDIO_INFO_RATE (info) < 32000) {
        bitrate = 40000;
      } else if (GST_AUDIO_INFO_RATE (info) == 32000) {
        bitrate = 96000;
      } else if (GST_AUDIO_INFO_RATE (info) <= 44100) {
        bitrate = 112000;
      } else {
        bitrate = 320000;
      }
    } else {
      /* 5, 5.1 */
      if (GST_AUDIO_INFO_RATE (info) < 32000) {
        bitrate = 160000;
      } else if (GST_AUDIO_INFO_RATE (info) <= 44100) {
        bitrate = 240000;
      } else {
        bitrate = 320000;
      }
    }
  }

  if ((err = aacEncoder_SetParam (self->enc, AACENC_TRANSMUX,
              transmux)) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to set transmux %d: %d", transmux, err);
    return FALSE;
  }

  if ((err = aacEncoder_SetParam (self->enc, AACENC_BITRATE,
              bitrate)) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to set bitrate %d: %d", bitrate, err);
    return FALSE;
  }

  if ((err = aacEncEncode (self->enc, NULL, NULL, NULL, NULL)) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to initialize encoder: %d", err);
    return FALSE;
  }

  if ((err = aacEncInfo (self->enc, &enc_info)) != AACENC_OK) {
    GST_ERROR_OBJECT (self, "Unable to get encoder info: %d", err);
    return FALSE;
  }

  gst_audio_encoder_set_frame_max (enc, 1);
  gst_audio_encoder_set_frame_samples_min (enc, enc_info.frameLength);
  gst_audio_encoder_set_frame_samples_max (enc, enc_info.frameLength);
  gst_audio_encoder_set_hard_min (enc, FALSE);
  self->outbuf_size = enc_info.maxOutBufBytes;
  self->samples_per_frame = enc_info.frameLength;

  src_caps = gst_caps_new_simple ("audio/mpeg",
      "mpegversion", G_TYPE_INT, mpegversion,
      "channels", G_TYPE_INT, GST_AUDIO_INFO_CHANNELS (info),
      "framed", G_TYPE_BOOLEAN, TRUE,
      "rate", G_TYPE_INT, GST_AUDIO_INFO_RATE (info), NULL);

  /* raw */
  if (transmux == 0) {
    GstBuffer *codec_data =
        gst_buffer_new_wrapped (g_memdup (enc_info.confBuf, enc_info.confSize),
        enc_info.confSize);
    gst_caps_set_simple (src_caps, "codec_data", GST_TYPE_BUFFER, codec_data,
        "stream-format", G_TYPE_STRING, "raw", NULL);
    gst_buffer_unref (codec_data);
  } else if (transmux == 1) {
    gst_caps_set_simple (src_caps, "stream-format", G_TYPE_STRING, "adif",
        NULL);
  } else if (transmux == 2) {
    gst_caps_set_simple (src_caps, "stream-format", G_TYPE_STRING, "adts",
        NULL);
  } else {
    g_assert_not_reached ();
  }

  gst_codec_utils_aac_caps_set_level_and_profile (src_caps, enc_info.confBuf,
      enc_info.confSize);

  ret = gst_audio_encoder_set_output_format (enc, src_caps);
  gst_caps_unref (src_caps);

  return ret;
}
Beispiel #15
0
static GstFlowReturn
gst_siren_enc_handle_frame (GstAudioEncoder * benc, GstBuffer * buf)
{
  GstSirenEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *out_buf;
  guint8 *in_data, *out_data;
  guint i, size, num_frames;
  gint out_size, in_size;
  gint encode_ret;

  g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);

  enc = GST_SIREN_ENC (benc);

  size = GST_BUFFER_SIZE (buf);

  GST_LOG_OBJECT (enc, "Received buffer of size %d", GST_BUFFER_SIZE (buf));

  g_return_val_if_fail (size > 0, GST_FLOW_ERROR);
  g_return_val_if_fail (size % 640 == 0, GST_FLOW_ERROR);

  /* we need to process 640 input bytes to produce 40 output bytes */
  /* calculate the amount of frames we will handle */
  num_frames = size / 640;

  /* this is the input/output size */
  in_size = num_frames * 640;
  out_size = num_frames * 40;

  GST_LOG_OBJECT (enc, "we have %u frames, %u in, %u out", num_frames, in_size,
      out_size);

  /* get a buffer */
  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (benc),
      -1, out_size, GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (benc)), &out_buf);
  if (ret != GST_FLOW_OK)
    goto alloc_failed;

  /* get the input data for all the frames */
  in_data = GST_BUFFER_DATA (buf);
  out_data = GST_BUFFER_DATA (out_buf);

  for (i = 0; i < num_frames; i++) {
    GST_LOG_OBJECT (enc, "Encoding frame %u/%u", i, num_frames);

    /* encode 640 input bytes to 40 output bytes */
    encode_ret = Siren7_EncodeFrame (enc->encoder, in_data, out_data);
    if (encode_ret != 0)
      goto encode_error;

    /* move to next frame */
    out_data += 40;
    in_data += 640;
  }

  GST_LOG_OBJECT (enc, "Finished encoding");

  /* we encode all we get, pass it along */
  ret = gst_audio_encoder_finish_frame (benc, out_buf, -1);

done:
  return ret;

  /* ERRORS */
alloc_failed:
  {
    GST_DEBUG_OBJECT (enc, "failed to pad_alloc buffer: %d (%s)", ret,
        gst_flow_get_name (ret));
    goto done;
  }
encode_error:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("Error encoding frame: %d", encode_ret));
    ret = GST_FLOW_ERROR;
    gst_buffer_unref (out_buf);
    goto done;
  }
}
Beispiel #16
0
static GstFlowReturn
gst_celt_enc_handle_frame (GstAudioEncoder * benc, GstBuffer * buf)
{
  GstCeltEnc *enc;
  GstFlowReturn ret = GST_FLOW_OK;

  enc = GST_CELT_ENC (benc);

  if (!enc->header_sent) {
    /* Celt streams begin with two headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.
       We merely need to make the headers, then pass them to libcelt 
       one at a time; libcelt handles the additional Ogg bitstream 
       constraints */
    GstBuffer *buf1, *buf2;
    GstCaps *caps;
    /* libcelt has a bug which underestimates header size by 4... */
    unsigned int header_size = enc->header.header_size + 4;
    unsigned char *data = g_malloc (header_size);

    /* create header buffer */
    int error = celt_header_to_packet (&enc->header, data, header_size);
    if (error < 0) {
      g_free (data);
      goto no_header;
    }
    buf1 = gst_buffer_new ();
    GST_BUFFER_DATA (buf1) = GST_BUFFER_MALLOCDATA (buf1) = data;
    GST_BUFFER_SIZE (buf1) = header_size;
    GST_BUFFER_OFFSET_END (buf1) = 0;
    GST_BUFFER_OFFSET (buf1) = 0;

    /* create comment buffer */
    buf2 = gst_celt_enc_create_metadata_buffer (enc);

    /* mark and put on caps */
    caps = gst_pad_get_caps (GST_AUDIO_ENCODER_SRC_PAD (enc));
    gst_caps_set_simple (caps,
        "rate", G_TYPE_INT, enc->rate,
        "channels", G_TYPE_INT, enc->channels,
        "frame-size", G_TYPE_INT, enc->frame_size, NULL);
    caps = _gst_caps_set_buffer_array (caps, "streamheader", buf1, buf2, NULL);

    /* negotiate with these caps */
    GST_DEBUG_OBJECT (enc, "here are the caps: %" GST_PTR_FORMAT, caps);
    GST_LOG_OBJECT (enc, "rate=%d channels=%d frame-size=%d",
        enc->rate, enc->channels, enc->frame_size);
    gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc), caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_caps_unref (caps);

    /* push out buffers */
    /* store buffers for later pre_push sending */
    g_slist_foreach (enc->headers, (GFunc) gst_buffer_unref, NULL);
    enc->headers = NULL;
    GST_DEBUG_OBJECT (enc, "storing header buffers");
    enc->headers = g_slist_prepend (enc->headers, buf2);
    enc->headers = g_slist_prepend (enc->headers, buf1);

    enc->header_sent = TRUE;
  }

  GST_DEBUG_OBJECT (enc, "received buffer %p of %u bytes", buf,
      buf ? GST_BUFFER_SIZE (buf) : 0);

  ret = gst_celt_enc_encode (enc, buf);

done:
  return ret;

  /* ERRORS */
no_header:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("Failed to encode header"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
}
Beispiel #17
0
static GstCaps *
gst_opus_enc_sink_getcaps (GstAudioEncoder * benc)
{
  GstOpusEnc *enc;
  GstCaps *caps;
  GstCaps *peercaps = NULL;
  GstCaps *intersect = NULL;
  guint i;
  gboolean allow_multistream;

  enc = GST_OPUS_ENC (benc);

  GST_DEBUG_OBJECT (enc, "sink getcaps");

  peercaps = gst_pad_peer_get_caps (GST_AUDIO_ENCODER_SRC_PAD (benc));
  if (!peercaps) {
    GST_DEBUG_OBJECT (benc, "No peercaps, returning template sink caps");
    return
        gst_caps_copy (gst_pad_get_pad_template_caps
        (GST_AUDIO_ENCODER_SINK_PAD (benc)));
  }

  intersect = gst_caps_intersect (peercaps,
      gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (benc)));
  gst_caps_unref (peercaps);

  if (gst_caps_is_empty (intersect))
    return intersect;

  allow_multistream = FALSE;
  for (i = 0; i < gst_caps_get_size (intersect); i++) {
    GstStructure *s = gst_caps_get_structure (intersect, i);
    gboolean multistream;
    if (gst_structure_get_boolean (s, "multistream", &multistream)) {
      if (multistream) {
        allow_multistream = TRUE;
      }
    } else {
      allow_multistream = TRUE;
    }
  }

  gst_caps_unref (intersect);

  caps =
      gst_caps_copy (gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SINK_PAD
          (benc)));
  if (!allow_multistream) {
    GValue range = { 0 };
    g_value_init (&range, GST_TYPE_INT_RANGE);
    gst_value_set_int_range (&range, 1, 2);
    for (i = 0; i < gst_caps_get_size (caps); i++) {
      GstStructure *s = gst_caps_get_structure (caps, i);
      gst_structure_set_value (s, "channels", &range);
    }
    g_value_unset (&range);
  }

  GST_DEBUG_OBJECT (enc, "Returning caps: %" GST_PTR_FORMAT, caps);
  return caps;
}
Beispiel #18
0
static gboolean
gst_omx_audio_enc_set_format (GstAudioEncoder * encoder, GstAudioInfo * info)
{
  GstOMXAudioEnc *self;
  GstOMXAudioEncClass *klass;
  gboolean needs_disable = FALSE;
  OMX_PARAM_PORTDEFINITIONTYPE port_def;
  OMX_AUDIO_PARAM_PCMMODETYPE pcm_param;
  gint i;
  OMX_ERRORTYPE err;

  self = GST_OMX_AUDIO_ENC (encoder);
  klass = GST_OMX_AUDIO_ENC_GET_CLASS (encoder);

  GST_DEBUG_OBJECT (self, "Setting new caps");

  /* Set audio encoder base class properties */
  gst_audio_encoder_set_frame_samples_min (encoder,
      gst_util_uint64_scale_ceil (OMX_MIN_PCMPAYLOAD_MSEC,
          GST_MSECOND * info->rate, GST_SECOND));
  gst_audio_encoder_set_frame_samples_max (encoder, 0);

  gst_omx_port_get_port_definition (self->enc_in_port, &port_def);

  needs_disable =
      gst_omx_component_get_state (self->enc,
      GST_CLOCK_TIME_NONE) != OMX_StateLoaded;
  /* If the component is not in Loaded state and a real format change happens
   * we have to disable the port and re-allocate all buffers. If no real
   * format change happened we can just exit here.
   */
  if (needs_disable) {
    GST_DEBUG_OBJECT (self, "Need to disable and drain encoder");
    gst_omx_audio_enc_drain (self);
    gst_omx_port_set_flushing (self->enc_out_port, 5 * GST_SECOND, TRUE);

    /* Wait until the srcpad loop is finished,
     * unlock GST_AUDIO_ENCODER_STREAM_LOCK to prevent deadlocks
     * caused by using this lock from inside the loop function */
    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
    gst_pad_stop_task (GST_AUDIO_ENCODER_SRC_PAD (encoder));
    GST_AUDIO_ENCODER_STREAM_LOCK (self);

    if (gst_omx_port_set_enabled (self->enc_in_port, FALSE) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_set_enabled (self->enc_out_port, FALSE) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_wait_buffers_released (self->enc_in_port,
            5 * GST_SECOND) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_wait_buffers_released (self->enc_out_port,
            1 * GST_SECOND) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_deallocate_buffers (self->enc_in_port) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_deallocate_buffers (self->enc_out_port) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_wait_enabled (self->enc_in_port,
            1 * GST_SECOND) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_wait_enabled (self->enc_out_port,
            1 * GST_SECOND) != OMX_ErrorNone)
      return FALSE;

    GST_DEBUG_OBJECT (self, "Encoder drained and disabled");
  }

  port_def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
  GST_DEBUG_OBJECT (self, "Setting inport port definition");
  if (gst_omx_port_update_port_definition (self->enc_in_port,
          &port_def) != OMX_ErrorNone)
    return FALSE;

  GST_OMX_INIT_STRUCT (&pcm_param);
  pcm_param.nPortIndex = self->enc_in_port->index;
  pcm_param.nChannels = info->channels;
  pcm_param.eNumData =
      ((info->finfo->flags & GST_AUDIO_FORMAT_FLAG_SIGNED) ?
      OMX_NumericalDataSigned : OMX_NumericalDataUnsigned);
  pcm_param.eEndian =
      ((info->finfo->endianness == G_LITTLE_ENDIAN) ?
      OMX_EndianLittle : OMX_EndianBig);
  pcm_param.bInterleaved = OMX_TRUE;
  pcm_param.nBitPerSample = info->finfo->width;
  pcm_param.nSamplingRate = info->rate;
  pcm_param.ePCMMode = OMX_AUDIO_PCMModeLinear;

  for (i = 0; i < pcm_param.nChannels; i++) {
    OMX_AUDIO_CHANNELTYPE pos;

    switch (info->position[i]) {
      case GST_AUDIO_CHANNEL_POSITION_MONO:
      case GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER:
        pos = OMX_AUDIO_ChannelCF;
        break;
      case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
        pos = OMX_AUDIO_ChannelLF;
        break;
      case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
        pos = OMX_AUDIO_ChannelRF;
        break;
      case GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT:
        pos = OMX_AUDIO_ChannelLS;
        break;
      case GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT:
        pos = OMX_AUDIO_ChannelRS;
        break;
      case GST_AUDIO_CHANNEL_POSITION_LFE1:
        pos = OMX_AUDIO_ChannelLFE;
        break;
      case GST_AUDIO_CHANNEL_POSITION_REAR_CENTER:
        pos = OMX_AUDIO_ChannelCS;
        break;
      case GST_AUDIO_CHANNEL_POSITION_REAR_LEFT:
        pos = OMX_AUDIO_ChannelLR;
        break;
      case GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT:
        pos = OMX_AUDIO_ChannelRR;
        break;
      default:
        pos = OMX_AUDIO_ChannelNone;
        break;
    }
    pcm_param.eChannelMapping[i] = pos;
  }

  GST_DEBUG_OBJECT (self, "Setting PCM parameters");
  err =
      gst_omx_component_set_parameter (self->enc, OMX_IndexParamAudioPcm,
      &pcm_param);
  if (err != OMX_ErrorNone) {
    GST_ERROR_OBJECT (self, "Failed to set PCM parameters: %s (0x%08x)",
        gst_omx_error_to_string (err), err);
    return FALSE;
  }

  if (klass->set_format) {
    if (!klass->set_format (self, self->enc_in_port, info)) {
      GST_ERROR_OBJECT (self, "Subclass failed to set the new format");
      return FALSE;
    }
  }

  GST_DEBUG_OBJECT (self, "Updating outport port definition");
  if (gst_omx_port_update_port_definition (self->enc_out_port,
          NULL) != OMX_ErrorNone)
    return FALSE;

  GST_DEBUG_OBJECT (self, "Enabling component");
  if (needs_disable) {
    if (gst_omx_port_set_enabled (self->enc_in_port, TRUE) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_allocate_buffers (self->enc_in_port) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_wait_enabled (self->enc_in_port,
            5 * GST_SECOND) != OMX_ErrorNone)
      return FALSE;
    if (gst_omx_port_mark_reconfigured (self->enc_in_port) != OMX_ErrorNone)
      return FALSE;
  } else {
    /* Disable output port */
    if (gst_omx_port_set_enabled (self->enc_out_port, FALSE) != OMX_ErrorNone)
      return FALSE;

    if (gst_omx_port_wait_enabled (self->enc_out_port,
            1 * GST_SECOND) != OMX_ErrorNone)
      return FALSE;

    if (gst_omx_component_set_state (self->enc, OMX_StateIdle) != OMX_ErrorNone)
      return FALSE;

    /* Need to allocate buffers to reach Idle state */
    if (gst_omx_port_allocate_buffers (self->enc_in_port) != OMX_ErrorNone)
      return FALSE;

    if (gst_omx_component_get_state (self->enc,
            GST_CLOCK_TIME_NONE) != OMX_StateIdle)
      return FALSE;

    if (gst_omx_component_set_state (self->enc,
            OMX_StateExecuting) != OMX_ErrorNone)
      return FALSE;

    if (gst_omx_component_get_state (self->enc,
            GST_CLOCK_TIME_NONE) != OMX_StateExecuting)
      return FALSE;
  }

  /* Unset flushing to allow ports to accept data again */
  gst_omx_port_set_flushing (self->enc_in_port, 5 * GST_SECOND, FALSE);
  gst_omx_port_set_flushing (self->enc_out_port, 5 * GST_SECOND, FALSE);

  if (gst_omx_component_get_last_error (self->enc) != OMX_ErrorNone) {
    GST_ERROR_OBJECT (self, "Component in error state: %s (0x%08x)",
        gst_omx_component_get_last_error_string (self->enc),
        gst_omx_component_get_last_error (self->enc));
    return FALSE;
  }

  /* Start the srcpad loop again */
  GST_DEBUG_OBJECT (self, "Starting task again");
  self->downstream_flow_ret = GST_FLOW_OK;
  gst_pad_start_task (GST_AUDIO_ENCODER_SRC_PAD (self),
      (GstTaskFunction) gst_omx_audio_enc_loop, encoder, NULL);

  return TRUE;
}
Beispiel #19
0
static void
gst_omx_audio_enc_loop (GstOMXAudioEnc * self)
{
  GstOMXAudioEncClass *klass;
  GstOMXPort *port = self->enc_out_port;
  GstOMXBuffer *buf = NULL;
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstOMXAcquireBufferReturn acq_return;
  OMX_ERRORTYPE err;

  klass = GST_OMX_AUDIO_ENC_GET_CLASS (self);

  acq_return = gst_omx_port_acquire_buffer (port, &buf);
  if (acq_return == GST_OMX_ACQUIRE_BUFFER_ERROR) {
    goto component_error;
  } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_FLUSHING) {
    goto flushing;
  } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_EOS) {
    goto eos;
  }

  if (!gst_pad_has_current_caps (GST_AUDIO_ENCODER_SRC_PAD (self))
      || acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
    GstAudioInfo *info =
        gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self));
    GstCaps *caps;

    GST_DEBUG_OBJECT (self, "Port settings have changed, updating caps");

    /* Reallocate all buffers */
    if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
      err = gst_omx_port_set_enabled (port, FALSE);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_buffers_released (port, 5 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_deallocate_buffers (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_enabled (port, 1 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

    }

    GST_AUDIO_ENCODER_STREAM_LOCK (self);

    caps = klass->get_caps (self, self->enc_out_port, info);
    if (!caps) {
      if (buf)
        gst_omx_port_release_buffer (self->enc_out_port, buf);
      GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
      goto caps_failed;
    }

    GST_DEBUG_OBJECT (self, "Setting output caps: %" GST_PTR_FORMAT, caps);

    if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) {
      gst_caps_unref (caps);
      if (buf)
        gst_omx_port_release_buffer (self->enc_out_port, buf);
      GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
      goto caps_failed;
    }
    gst_caps_unref (caps);

    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);

    if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
      err = gst_omx_port_set_enabled (port, TRUE);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_allocate_buffers (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_enabled (port, 5 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_populate (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_mark_reconfigured (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;
    }

    /* Now get a buffer */
    if (acq_return != GST_OMX_ACQUIRE_BUFFER_OK) {
      return;
    }
  }

  g_assert (acq_return == GST_OMX_ACQUIRE_BUFFER_OK);
  if (!buf) {
    g_assert ((klass->cdata.hacks & GST_OMX_HACK_NO_EMPTY_EOS_BUFFER));
    GST_AUDIO_ENCODER_STREAM_LOCK (self);
    goto eos;
  }

  GST_DEBUG_OBJECT (self, "Handling buffer: 0x%08x %" G_GUINT64_FORMAT,
      (guint) buf->omx_buf->nFlags, (guint64) buf->omx_buf->nTimeStamp);

  /* This prevents a deadlock between the srcpad stream
   * lock and the videocodec stream lock, if ::reset()
   * is called at the wrong time
   */
  if (gst_omx_port_is_flushing (self->enc_out_port)) {
    GST_DEBUG_OBJECT (self, "Flushing");
    gst_omx_port_release_buffer (self->enc_out_port, buf);
    goto flushing;
  }

  GST_AUDIO_ENCODER_STREAM_LOCK (self);

  if ((buf->omx_buf->nFlags & OMX_BUFFERFLAG_CODECCONFIG)
      && buf->omx_buf->nFilledLen > 0) {
    GstCaps *caps;
    GstBuffer *codec_data;
    GstMapInfo map = GST_MAP_INFO_INIT;

    GST_DEBUG_OBJECT (self, "Handling codec data");
    caps =
        gst_caps_copy (gst_pad_get_current_caps (GST_AUDIO_ENCODER_SRC_PAD
            (self)));
    codec_data = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen);

    gst_buffer_map (codec_data, &map, GST_MAP_WRITE);
    memcpy (map.data,
        buf->omx_buf->pBuffer + buf->omx_buf->nOffset,
        buf->omx_buf->nFilledLen);
    gst_buffer_unmap (codec_data, &map);

    gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, codec_data, NULL);
    if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) {
      gst_caps_unref (caps);
      if (buf)
        gst_omx_port_release_buffer (self->enc_out_port, buf);
      GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
      goto caps_failed;
    }
    gst_caps_unref (caps);
    flow_ret = GST_FLOW_OK;
  } else if (buf->omx_buf->nFilledLen > 0) {
    GstBuffer *outbuf;
    guint n_samples;

    GST_DEBUG_OBJECT (self, "Handling output data");

    n_samples =
        klass->get_num_samples (self, self->enc_out_port,
        gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self)), buf);

    if (buf->omx_buf->nFilledLen > 0) {
      GstMapInfo map = GST_MAP_INFO_INIT;
      outbuf = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen);

      gst_buffer_map (outbuf, &map, GST_MAP_WRITE);

      memcpy (map.data,
          buf->omx_buf->pBuffer + buf->omx_buf->nOffset,
          buf->omx_buf->nFilledLen);
      gst_buffer_unmap (outbuf, &map);

    } else {
      outbuf = gst_buffer_new ();
    }

    GST_BUFFER_TIMESTAMP (outbuf) =
        gst_util_uint64_scale (buf->omx_buf->nTimeStamp, GST_SECOND,
        OMX_TICKS_PER_SECOND);
    if (buf->omx_buf->nTickCount != 0)
      GST_BUFFER_DURATION (outbuf) =
          gst_util_uint64_scale (buf->omx_buf->nTickCount, GST_SECOND,
          OMX_TICKS_PER_SECOND);

    flow_ret =
        gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (self),
        outbuf, n_samples);
  }

  GST_DEBUG_OBJECT (self, "Handled output data");

  GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret));

  err = gst_omx_port_release_buffer (port, buf);
  if (err != OMX_ErrorNone)
    goto release_error;

  self->downstream_flow_ret = flow_ret;

  if (flow_ret != GST_FLOW_OK)
    goto flow_error;

  GST_AUDIO_ENCODER_STREAM_UNLOCK (self);

  return;

component_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL),
        ("OpenMAX component in error state %s (0x%08x)",
            gst_omx_component_get_last_error_string (self->enc),
            gst_omx_component_get_last_error (self->enc)));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    self->started = FALSE;
    return;
  }
flushing:
  {
    GST_DEBUG_OBJECT (self, "Flushing -- stopping task");
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_FLUSHING;
    self->started = FALSE;
    return;
  }
eos:
  {
    g_mutex_lock (&self->drain_lock);
    if (self->draining) {
      GST_DEBUG_OBJECT (self, "Drained");
      self->draining = FALSE;
      g_cond_broadcast (&self->drain_cond);
      flow_ret = GST_FLOW_OK;
      gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    } else {
      GST_DEBUG_OBJECT (self, "Component signalled EOS");
      flow_ret = GST_FLOW_EOS;
    }
    g_mutex_unlock (&self->drain_lock);

    GST_AUDIO_ENCODER_STREAM_LOCK (self);
    self->downstream_flow_ret = flow_ret;

    /* Here we fallback and pause the task for the EOS case */
    if (flow_ret != GST_FLOW_OK)
      goto flow_error;

    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);

    return;
  }
flow_error:
  {
    if (flow_ret == GST_FLOW_EOS) {
      GST_DEBUG_OBJECT (self, "EOS");

      gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    } else if (flow_ret == GST_FLOW_NOT_LINKED || flow_ret < GST_FLOW_EOS) {
      GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."),
          ("stream stopped, reason %s", gst_flow_get_name (flow_ret)));

      gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    }
    self->started = FALSE;
    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
    return;
  }
reconfigure_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Unable to reconfigure output port"));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED;
    self->started = FALSE;
    return;
  }
caps_failed:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to set caps"));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED;
    self->started = FALSE;
    return;
  }
release_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Failed to relase output buffer to component: %s (0x%08x)",
            gst_omx_error_to_string (err), err));
    gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    self->started = FALSE;
    GST_AUDIO_ENCODER_STREAM_UNLOCK (self);
    return;
  }
}
static GstFlowReturn
gst_speex_enc_encode (GstSpeexEnc * enc, GstBuffer * buf)
{
  gint frame_size = enc->frame_size;
  gint bytes = frame_size * 2 * enc->channels, samples, size;
  gint outsize, written, dtx_ret = 0;
  guint8 *data;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;

  if (G_LIKELY (buf)) {
    data = GST_BUFFER_DATA (buf);
    size = GST_BUFFER_SIZE (buf);

    if (G_UNLIKELY (size % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");
      size = ((size / bytes) + 1) * bytes;
      data = g_malloc0 (size);
      memcpy (data, GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf));
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  samples = size / (2 * enc->channels);
  speex_bits_reset (&enc->bits);

  /* FIXME what about dropped samples if DTS enabled ?? */

  while (size) {
    GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)", frame_size, bytes);

    if (enc->channels == 2) {
      speex_encode_stereo_int ((gint16 *) data, frame_size, &enc->bits);
    }
    dtx_ret += speex_encode_int (enc->state, (gint16 *) data, &enc->bits);

    data += bytes;
    size -= bytes;
  }

  speex_bits_insert_terminator (&enc->bits);
  outsize = speex_bits_nbytes (&enc->bits);

  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, outsize,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if ((GST_FLOW_OK != ret))
    goto done;

  written = speex_bits_write (&enc->bits,
      (gchar *) GST_BUFFER_DATA (outbuf), outsize);

  if (G_UNLIKELY (written < outsize)) {
    GST_ERROR_OBJECT (enc, "short write: %d < %d bytes", written, outsize);
    GST_BUFFER_SIZE (outbuf) = written;
  } else if (G_UNLIKELY (written > outsize)) {
    GST_ERROR_OBJECT (enc, "overrun: %d > %d bytes", written, outsize);
  }

  if (!dtx_ret)
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);

  ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc),
      outbuf, samples);

done:
  return ret;
}
Beispiel #21
0
static GstFlowReturn
gst_celt_enc_encode (GstCeltEnc * enc, GstBuffer * buf)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gint frame_size = enc->frame_size;
  gint bytes = frame_size * 2 * enc->channels;
  gint bytes_per_packet;
  gint16 *data, *data0 = NULL;
  gint outsize, size;
  GstBuffer *outbuf;

  if (G_LIKELY (buf)) {
    data = (gint16 *) GST_BUFFER_DATA (buf);
    size = GST_BUFFER_SIZE (buf);

    if (G_UNLIKELY (size % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");
      size = ((size / bytes) + 1) * bytes;
      data0 = data = g_malloc0 (size);
      memcpy (data, GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf));
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  frame_size = size / (2 * enc->channels);
  if (enc->cbr) {
    bytes_per_packet = (enc->bitrate * frame_size / enc->rate + 4) / 8;
  } else {
    bytes_per_packet = (enc->max_bitrate * frame_size / enc->rate + 4) / 8;
  }

  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, bytes_per_packet,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if (GST_FLOW_OK != ret)
    goto done;

  GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)", frame_size, bytes);

#ifdef HAVE_CELT_0_8
  outsize =
      celt_encode (enc->state, data, frame_size,
      GST_BUFFER_DATA (outbuf), bytes_per_packet);
#else
  outsize =
      celt_encode (enc->state, data, NULL,
      GST_BUFFER_DATA (outbuf), bytes_per_packet);
#endif

  if (outsize < 0) {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("encoding failed: %d", outsize));
    ret = GST_FLOW_ERROR;
    goto done;
  }

  GST_DEBUG_OBJECT (enc, "encoding %d bytes", bytes);

  ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc),
      outbuf, frame_size);

done:
  g_free (data0);
  return ret;
}
Beispiel #22
0
static GstFlowReturn
gst_opus_enc_encode (GstOpusEnc * enc, GstBuffer * buf)
{
  guint8 *bdata, *data, *mdata = NULL;
  gsize bsize, size;
  gsize bytes;
  gint ret = GST_FLOW_OK;
  gint outsize;
  GstBuffer *outbuf;

  g_mutex_lock (enc->property_lock);

  bytes = enc->frame_samples * enc->n_channels * 2;
  if (G_LIKELY (buf)) {
    bdata = GST_BUFFER_DATA (buf);
    bsize = GST_BUFFER_SIZE (buf);
    if (G_UNLIKELY (bsize % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");

      size = ((bsize / bytes) + 1) * bytes;
      mdata = g_malloc0 (size);
      memcpy (mdata, bdata, bsize);
      bdata = NULL;
      data = mdata;
    } else {
      data = bdata;
      size = bsize;
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  g_assert (size == bytes);

  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, enc->max_payload_size * enc->n_channels,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if (GST_FLOW_OK != ret)
    goto done;

  GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)",
      enc->frame_samples, (int) bytes);

  outsize =
      opus_multistream_encode (enc->state, (const gint16 *) data,
      enc->frame_samples, GST_BUFFER_DATA (outbuf),
      enc->max_payload_size * enc->n_channels);

  if (outsize < 0) {
    GST_ERROR_OBJECT (enc, "Encoding failed: %d", outsize);
    ret = GST_FLOW_ERROR;
    goto done;
  } else if (outsize > enc->max_payload_size) {
    GST_WARNING_OBJECT (enc,
        "Encoded size %d is higher than max payload size (%d bytes)",
        outsize, enc->max_payload_size);
    ret = GST_FLOW_ERROR;
    goto done;
  }

  GST_DEBUG_OBJECT (enc, "Output packet is %u bytes", outsize);
  GST_BUFFER_SIZE (outbuf) = outsize;

  ret =
      gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc), outbuf,
      enc->frame_samples);

done:

  g_mutex_unlock (enc->property_lock);

  if (mdata)
    g_free (mdata);

  return ret;
}
Beispiel #23
0
static gboolean
gst_sbc_enc_set_format (GstAudioEncoder * audio_enc, GstAudioInfo * info)
{
  const gchar *allocation_method, *channel_mode;
  GstSbcEnc *enc = GST_SBC_ENC (audio_enc);
  GstStructure *s;
  GstCaps *caps, *filter_caps;
  GstCaps *output_caps = NULL;
  guint sampleframes_per_frame;

  enc->rate = GST_AUDIO_INFO_RATE (info);
  enc->channels = GST_AUDIO_INFO_CHANNELS (info);

  /* negotiate output format based on downstream caps restrictions */
  caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (enc));
  if (caps == GST_CAPS_NONE || gst_caps_is_empty (caps))
    goto failure;

  if (caps == NULL)
    caps = gst_static_pad_template_get_caps (&sbc_enc_src_factory);

  /* fixate output caps */
  filter_caps = gst_caps_new_simple ("audio/x-sbc", "rate", G_TYPE_INT,
      enc->rate, "channels", G_TYPE_INT, enc->channels, NULL);
  output_caps = gst_caps_intersect (caps, filter_caps);
  gst_caps_unref (filter_caps);

  if (output_caps == NULL || gst_caps_is_empty (output_caps)) {
    GST_WARNING_OBJECT (enc, "Couldn't negotiate output caps with input rate "
        "%d and input channels %d and allowed output caps %" GST_PTR_FORMAT,
        enc->rate, enc->channels, caps);
    goto failure;
  }

  gst_caps_unref (caps);
  caps = NULL;

  GST_DEBUG_OBJECT (enc, "fixating caps %" GST_PTR_FORMAT, output_caps);
  output_caps = gst_caps_truncate (output_caps);
  s = gst_caps_get_structure (output_caps, 0);
  if (enc->channels == 1)
    gst_structure_fixate_field_string (s, "channel-mode", "mono");
  else
    gst_structure_fixate_field_string (s, "channel-mode", "joint");

  gst_structure_fixate_field_nearest_int (s, "bitpool", 64);
  gst_structure_fixate_field_nearest_int (s, "blocks", 16);
  gst_structure_fixate_field_nearest_int (s, "subbands", 8);
  gst_structure_fixate_field_string (s, "allocation-method", "loudness");
  s = NULL;

  /* in case there's anything else left to fixate */
  output_caps = gst_caps_fixate (output_caps);
  gst_caps_set_simple (output_caps, "parsed", G_TYPE_BOOLEAN, TRUE, NULL);

  GST_INFO_OBJECT (enc, "output caps %" GST_PTR_FORMAT, output_caps);

  /* let's see what we fixated to */
  s = gst_caps_get_structure (output_caps, 0);
  gst_structure_get_int (s, "blocks", &enc->blocks);
  gst_structure_get_int (s, "subbands", &enc->subbands);
  gst_structure_get_int (s, "bitpool", &enc->bitpool);
  allocation_method = gst_structure_get_string (s, "allocation-method");
  channel_mode = gst_structure_get_string (s, "channel-mode");

  /* We want channel-mode and channels coherent */
  if (enc->channels == 1) {
    if (g_strcmp0 (channel_mode, "mono") != 0) {
      GST_ERROR_OBJECT (enc, "Can't have channel-mode '%s' for 1 channel",
          channel_mode);
      goto failure;
    }
  } else {
    if (g_strcmp0 (channel_mode, "joint") != 0 &&
        g_strcmp0 (channel_mode, "stereo") != 0 &&
        g_strcmp0 (channel_mode, "dual") != 0) {
      GST_ERROR_OBJECT (enc, "Can't have channel-mode '%s' for 2 channels",
          channel_mode);
      goto failure;
    }
  }

  /* we want to be handed all available samples in handle_frame, but always
   * enough to encode a frame */
  sampleframes_per_frame = enc->blocks * enc->subbands;
  gst_audio_encoder_set_frame_samples_min (audio_enc, sampleframes_per_frame);
  gst_audio_encoder_set_frame_samples_max (audio_enc, sampleframes_per_frame);
  gst_audio_encoder_set_frame_max (audio_enc, 0);

  /* FIXME: what to do with left-over samples at the end? can we encode them? */
  gst_audio_encoder_set_hard_min (audio_enc, TRUE);

  /* and configure encoder based on the output caps we negotiated */
  if (enc->rate == 16000)
    enc->sbc.frequency = SBC_FREQ_16000;
  else if (enc->rate == 32000)
    enc->sbc.frequency = SBC_FREQ_32000;
  else if (enc->rate == 44100)
    enc->sbc.frequency = SBC_FREQ_44100;
  else if (enc->rate == 48000)
    enc->sbc.frequency = SBC_FREQ_48000;
  else
    goto failure;

  if (enc->blocks == 4)
    enc->sbc.blocks = SBC_BLK_4;
  else if (enc->blocks == 8)
    enc->sbc.blocks = SBC_BLK_8;
  else if (enc->blocks == 12)
    enc->sbc.blocks = SBC_BLK_12;
  else if (enc->blocks == 16)
    enc->sbc.blocks = SBC_BLK_16;
  else
    goto failure;

  enc->sbc.subbands = (enc->subbands == 4) ? SBC_SB_4 : SBC_SB_8;
  enc->sbc.bitpool = enc->bitpool;

  if (channel_mode == NULL || allocation_method == NULL)
    goto failure;

  if (strcmp (channel_mode, "joint") == 0)
    enc->sbc.mode = SBC_MODE_JOINT_STEREO;
  else if (strcmp (channel_mode, "stereo") == 0)
    enc->sbc.mode = SBC_MODE_STEREO;
  else if (strcmp (channel_mode, "dual") == 0)
    enc->sbc.mode = SBC_MODE_DUAL_CHANNEL;
  else if (strcmp (channel_mode, "mono") == 0)
    enc->sbc.mode = SBC_MODE_MONO;
  else if (strcmp (channel_mode, "auto") == 0)
    enc->sbc.mode = SBC_MODE_JOINT_STEREO;
  else
    goto failure;

  if (strcmp (allocation_method, "loudness") == 0)
    enc->sbc.allocation = SBC_AM_LOUDNESS;
  else if (strcmp (allocation_method, "snr") == 0)
    enc->sbc.allocation = SBC_AM_SNR;
  else
    goto failure;

  if (!gst_audio_encoder_set_output_format (audio_enc, output_caps))
    goto failure;

  return gst_audio_encoder_negotiate (audio_enc);

failure:
  if (output_caps)
    gst_caps_unref (output_caps);
  if (caps)
    gst_caps_unref (caps);
  return FALSE;
}
Beispiel #24
0
static gboolean
gst_ffmpegaudenc_set_format (GstAudioEncoder * encoder, GstAudioInfo * info)
{
  GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) encoder;
  GstCaps *other_caps;
  GstCaps *allowed_caps;
  GstCaps *icaps;
  gsize frame_size;
  GstFFMpegAudEncClass *oclass =
      (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);

  /* close old session */
  if (ffmpegaudenc->opened) {
    gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
    ffmpegaudenc->opened = FALSE;
  }

  /* if we set it in _getcaps we should set it also in _link */
  ffmpegaudenc->context->strict_std_compliance = -1;

  /* user defined properties */
  if (ffmpegaudenc->bitrate > 0) {
    GST_INFO_OBJECT (ffmpegaudenc, "Setting avcontext to bitrate %d",
        ffmpegaudenc->bitrate);
    ffmpegaudenc->context->bit_rate = ffmpegaudenc->bitrate;
    ffmpegaudenc->context->bit_rate_tolerance = ffmpegaudenc->bitrate;
  } else {
    GST_INFO_OBJECT (ffmpegaudenc, "Using avcontext default bitrate %d",
        ffmpegaudenc->context->bit_rate);
  }

  /* RTP payload used for GOB production (for Asterisk) */
  if (ffmpegaudenc->rtp_payload_size) {
    ffmpegaudenc->context->rtp_payload_size = ffmpegaudenc->rtp_payload_size;
  }

  /* some other defaults */
  ffmpegaudenc->context->rc_strategy = 2;
  ffmpegaudenc->context->b_frame_strategy = 0;
  ffmpegaudenc->context->coder_type = 0;
  ffmpegaudenc->context->context_model = 0;
  ffmpegaudenc->context->scenechange_threshold = 0;
  ffmpegaudenc->context->inter_threshold = 0;

  /* fetch pix_fmt and so on */
  gst_ffmpeg_audioinfo_to_context (info, ffmpegaudenc->context);
  if (!ffmpegaudenc->context->time_base.den) {
    ffmpegaudenc->context->time_base.den = GST_AUDIO_INFO_RATE (info);
    ffmpegaudenc->context->time_base.num = 1;
    ffmpegaudenc->context->ticks_per_frame = 1;
  }

  if (ffmpegaudenc->context->channel_layout) {
    gst_ffmpeg_channel_layout_to_gst (ffmpegaudenc->context->channel_layout,
        ffmpegaudenc->context->channels, ffmpegaudenc->ffmpeg_layout);
    ffmpegaudenc->needs_reorder =
        (memcmp (ffmpegaudenc->ffmpeg_layout, info->position,
            sizeof (GstAudioChannelPosition) *
            ffmpegaudenc->context->channels) != 0);
  }

  /* open codec */
  if (gst_ffmpeg_avcodec_open (ffmpegaudenc->context, oclass->in_plugin) < 0) {
    if (ffmpegaudenc->context->priv_data)
      gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
    GST_DEBUG_OBJECT (ffmpegaudenc, "avenc_%s: Failed to open FFMPEG codec",
        oclass->in_plugin->name);
    return FALSE;
  }

  /* some codecs support more than one format, first auto-choose one */
  GST_DEBUG_OBJECT (ffmpegaudenc, "picking an output format ...");
  allowed_caps = gst_pad_get_allowed_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
  if (!allowed_caps) {
    GST_DEBUG_OBJECT (ffmpegaudenc, "... but no peer, using template caps");
    /* we need to copy because get_allowed_caps returns a ref, and
     * get_pad_template_caps doesn't */
    allowed_caps =
        gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (encoder));
  }
  GST_DEBUG_OBJECT (ffmpegaudenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
  gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
      oclass->in_plugin->type, allowed_caps, ffmpegaudenc->context);

  /* try to set this caps on the other side */
  other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
      ffmpegaudenc->context, TRUE);

  if (!other_caps) {
    gst_caps_unref (allowed_caps);
    gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
    GST_DEBUG ("Unsupported codec - no caps found");
    return FALSE;
  }

  icaps = gst_caps_intersect (allowed_caps, other_caps);
  gst_caps_unref (allowed_caps);
  gst_caps_unref (other_caps);
  if (gst_caps_is_empty (icaps)) {
    gst_caps_unref (icaps);
    return FALSE;
  }
  icaps = gst_caps_truncate (icaps);

  if (!gst_audio_encoder_set_output_format (GST_AUDIO_ENCODER (ffmpegaudenc),
          icaps)) {
    gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
    gst_caps_unref (icaps);
    return FALSE;
  }
  gst_caps_unref (icaps);

  frame_size = ffmpegaudenc->context->frame_size;
  if (frame_size > 1) {
    gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
        frame_size);
    gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
        frame_size);
    gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 1);
  } else {
    gst_audio_encoder_set_frame_samples_min (GST_AUDIO_ENCODER (ffmpegaudenc),
        0);
    gst_audio_encoder_set_frame_samples_max (GST_AUDIO_ENCODER (ffmpegaudenc),
        0);
    gst_audio_encoder_set_frame_max (GST_AUDIO_ENCODER (ffmpegaudenc), 0);
  }

  /* success! */
  ffmpegaudenc->opened = TRUE;

  return TRUE;
}
static GstFlowReturn
gst_speex_enc_encode (GstSpeexEnc * enc, GstBuffer * buf)
{
  gint frame_size = enc->frame_size;
  gint bytes = frame_size * 2 * enc->channels, samples;
  gint outsize, written, dtx_ret = 0;
  GstMapInfo map;
  guint8 *data, *data0 = NULL, *bdata;
  gsize bsize, size;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;

  if (G_LIKELY (buf)) {
    gst_buffer_map (buf, &map, GST_MAP_READ);
    bdata = map.data;
    bsize = map.size;

    if (G_UNLIKELY (bsize % bytes)) {
      GST_DEBUG_OBJECT (enc, "draining; adding silence samples");

      size = ((bsize / bytes) + 1) * bytes;
      data0 = data = g_malloc0 (size);
      memcpy (data, bdata, bsize);
      gst_buffer_unmap (buf, &map);
      bdata = NULL;
    } else {
      data = bdata;
      size = bsize;
    }
  } else {
    GST_DEBUG_OBJECT (enc, "nothing to drain");
    goto done;
  }

  samples = size / (2 * enc->channels);
  speex_bits_reset (&enc->bits);

  /* FIXME what about dropped samples if DTS enabled ?? */

  while (size) {
    GST_DEBUG_OBJECT (enc, "encoding %d samples (%d bytes)", frame_size, bytes);

    if (enc->channels == 2) {
      speex_encode_stereo_int ((gint16 *) data, frame_size, &enc->bits);
    }
    dtx_ret += speex_encode_int (enc->state, (gint16 *) data, &enc->bits);

    data += bytes;
    size -= bytes;
  }

  speex_bits_insert_terminator (&enc->bits);
  outsize = speex_bits_nbytes (&enc->bits);

  if (bdata)
    gst_buffer_unmap (buf, &map);

#if 0
  ret = gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_ENCODER_SRC_PAD (enc),
      GST_BUFFER_OFFSET_NONE, outsize,
      GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (enc)), &outbuf);

  if ((GST_FLOW_OK != ret))
    goto done;
#endif
  outbuf = gst_buffer_new_allocate (NULL, outsize, NULL);
  gst_buffer_map (outbuf, &map, GST_MAP_WRITE);

  written = speex_bits_write (&enc->bits, (gchar *) map.data, outsize);

  if (G_UNLIKELY (written < outsize)) {
    GST_ERROR_OBJECT (enc, "short write: %d < %d bytes", written, outsize);
  } else if (G_UNLIKELY (written > outsize)) {
    GST_ERROR_OBJECT (enc, "overrun: %d > %d bytes", written, outsize);
    written = outsize;
  }
  gst_buffer_unmap (outbuf, &map);
  gst_buffer_resize (outbuf, 0, written);

  if (!dtx_ret)
    GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);

  ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (enc),
      outbuf, samples);

done:
  g_free (data0);
  return ret;
}