static GstFlowReturn gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc, GstBuffer * buffer, gint * have_data) { GstAudioEncoder *enc; AVCodecContext *ctx; gint res; GstFlowReturn ret; GstAudioInfo *info; AVPacket *pkt; AVFrame *frame = ffmpegaudenc->frame; gboolean planar; gint nsamples = -1; enc = GST_AUDIO_ENCODER (ffmpegaudenc); ctx = ffmpegaudenc->context; pkt = g_slice_new0 (AVPacket); if (buffer != NULL) { BufferInfo *buffer_info = g_slice_new0 (BufferInfo); guint8 *audio_in; guint in_size; buffer_info->buffer = buffer; gst_buffer_map (buffer, &buffer_info->map, GST_MAP_READ); audio_in = buffer_info->map.data; in_size = buffer_info->map.size; GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer %p size:%u", audio_in, in_size); info = gst_audio_encoder_get_audio_info (enc); planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt); if (planar && info->channels > 1) { gint channels; gint i, j; nsamples = frame->nb_samples = in_size / info->bpf; channels = info->channels; frame->buf[0] = av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0); if (info->channels > AV_NUM_DATA_POINTERS) { buffer_info->ext_data_array = frame->extended_data = g_new (uint8_t *, info->channels); } else {
static GstFlowReturn gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc, guint8 * audio_in, guint in_size, gint * have_data) { GstAudioEncoder *enc; AVCodecContext *ctx; gint res; GstFlowReturn ret; GstAudioInfo *info; AVPacket pkt; AVFrame frame; gboolean planar; enc = GST_AUDIO_ENCODER (ffmpegaudenc); ctx = ffmpegaudenc->context; GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer "); memset (&pkt, 0, sizeof (pkt)); memset (&frame, 0, sizeof (frame)); avcodec_get_frame_defaults (&frame); info = gst_audio_encoder_get_audio_info (enc); planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt); if (planar && info->channels > 1) { gint channels, nsamples; gint i, j; nsamples = frame.nb_samples = in_size / info->bpf; channels = info->channels; if (info->channels > AV_NUM_DATA_POINTERS) { frame.extended_data = g_new (uint8_t *, info->channels); } else {
static GstFlowReturn gst_fdkaacenc_handle_frame (GstAudioEncoder * enc, GstBuffer * inbuf) { GstFdkAacEnc *self = GST_FDKAACENC (enc); GstFlowReturn ret = GST_FLOW_OK; GstAudioInfo *info; GstMapInfo imap, omap; GstBuffer *outbuf; AACENC_BufDesc in_desc = { 0 }; AACENC_BufDesc out_desc = { 0 }; AACENC_InArgs in_args = { 0 }; AACENC_OutArgs out_args = { 0 }; gint in_id = IN_AUDIO_DATA, out_id = OUT_BITSTREAM_DATA; gint in_sizes, out_sizes; gint in_el_sizes, out_el_sizes; AACENC_ERROR err; info = gst_audio_encoder_get_audio_info (enc); if (inbuf) { if (self->need_reorder) { inbuf = gst_buffer_copy (inbuf); gst_buffer_map (inbuf, &imap, GST_MAP_READWRITE); gst_audio_reorder_channels (imap.data, imap.size, GST_AUDIO_INFO_FORMAT (info), GST_AUDIO_INFO_CHANNELS (info), &GST_AUDIO_INFO_POSITION (info, 0), self->aac_positions); } else { gst_buffer_map (inbuf, &imap, GST_MAP_READ); } in_args.numInSamples = imap.size / GST_AUDIO_INFO_BPS (info); in_sizes = imap.size; in_el_sizes = GST_AUDIO_INFO_BPS (info); in_desc.numBufs = 1; } else { in_args.numInSamples = -1; in_sizes = 0; in_el_sizes = 0; in_desc.numBufs = 0; } in_desc.bufferIdentifiers = &in_id; in_desc.bufs = (void *) &imap.data; in_desc.bufSizes = &in_sizes; in_desc.bufElSizes = &in_el_sizes; outbuf = gst_audio_encoder_allocate_output_buffer (enc, self->outbuf_size); if (!outbuf) { ret = GST_FLOW_ERROR; goto out; } gst_buffer_map (outbuf, &omap, GST_MAP_WRITE); out_sizes = omap.size; out_el_sizes = 1; out_desc.bufferIdentifiers = &out_id; out_desc.numBufs = 1; out_desc.bufs = (void *) &omap.data; out_desc.bufSizes = &out_sizes; out_desc.bufElSizes = &out_el_sizes; err = aacEncEncode (self->enc, &in_desc, &out_desc, &in_args, &out_args); if (err == AACENC_ENCODE_EOF && !inbuf) goto out; else if (err != AACENC_OK) { GST_ERROR_OBJECT (self, "Failed to encode data: %d", err); ret = GST_FLOW_ERROR; goto out; } if (inbuf) { gst_buffer_unmap (inbuf, &imap); if (self->need_reorder) gst_buffer_unref (inbuf); inbuf = NULL; } if (!out_args.numOutBytes) goto out; gst_buffer_unmap (outbuf, &omap); gst_buffer_set_size (outbuf, out_args.numOutBytes); ret = gst_audio_encoder_finish_frame (enc, outbuf, self->samples_per_frame); outbuf = NULL; out: if (outbuf) { gst_buffer_unmap (outbuf, &omap); gst_buffer_unref (outbuf); } if (inbuf) { gst_buffer_unmap (inbuf, &imap); if (self->need_reorder) gst_buffer_unref (inbuf); } return ret; }
static void gst_omx_audio_enc_loop (GstOMXAudioEnc * self) { GstOMXAudioEncClass *klass; GstOMXPort *port = self->enc_out_port; GstOMXBuffer *buf = NULL; GstFlowReturn flow_ret = GST_FLOW_OK; GstOMXAcquireBufferReturn acq_return; OMX_ERRORTYPE err; klass = GST_OMX_AUDIO_ENC_GET_CLASS (self); acq_return = gst_omx_port_acquire_buffer (port, &buf); if (acq_return == GST_OMX_ACQUIRE_BUFFER_ERROR) { goto component_error; } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_FLUSHING) { goto flushing; } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_EOS) { goto eos; } if (!gst_pad_has_current_caps (GST_AUDIO_ENCODER_SRC_PAD (self)) || acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) { GstAudioInfo *info = gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self)); GstCaps *caps; GST_DEBUG_OBJECT (self, "Port settings have changed, updating caps"); /* Reallocate all buffers */ if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) { err = gst_omx_port_set_enabled (port, FALSE); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_buffers_released (port, 5 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_deallocate_buffers (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_enabled (port, 1 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; } GST_AUDIO_ENCODER_STREAM_LOCK (self); caps = klass->get_caps (self, self->enc_out_port, info); if (!caps) { if (buf) gst_omx_port_release_buffer (self->enc_out_port, buf); GST_AUDIO_ENCODER_STREAM_UNLOCK (self); goto caps_failed; } GST_DEBUG_OBJECT (self, "Setting output caps: %" GST_PTR_FORMAT, caps); if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) { gst_caps_unref (caps); if (buf) gst_omx_port_release_buffer (self->enc_out_port, buf); GST_AUDIO_ENCODER_STREAM_UNLOCK (self); goto caps_failed; } gst_caps_unref (caps); GST_AUDIO_ENCODER_STREAM_UNLOCK (self); if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) { err = gst_omx_port_set_enabled (port, TRUE); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_allocate_buffers (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_enabled (port, 5 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_populate (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_mark_reconfigured (port); if (err != OMX_ErrorNone) goto reconfigure_error; } /* Now get a buffer */ if (acq_return != GST_OMX_ACQUIRE_BUFFER_OK) { return; } } g_assert (acq_return == GST_OMX_ACQUIRE_BUFFER_OK); if (!buf) { g_assert ((klass->cdata.hacks & GST_OMX_HACK_NO_EMPTY_EOS_BUFFER)); GST_AUDIO_ENCODER_STREAM_LOCK (self); goto eos; } GST_DEBUG_OBJECT (self, "Handling buffer: 0x%08x %" G_GUINT64_FORMAT, (guint) buf->omx_buf->nFlags, (guint64) buf->omx_buf->nTimeStamp); /* This prevents a deadlock between the srcpad stream * lock and the videocodec stream lock, if ::reset() * is called at the wrong time */ if (gst_omx_port_is_flushing (self->enc_out_port)) { GST_DEBUG_OBJECT (self, "Flushing"); gst_omx_port_release_buffer (self->enc_out_port, buf); goto flushing; } GST_AUDIO_ENCODER_STREAM_LOCK (self); if ((buf->omx_buf->nFlags & OMX_BUFFERFLAG_CODECCONFIG) && buf->omx_buf->nFilledLen > 0) { GstCaps *caps; GstBuffer *codec_data; GstMapInfo map = GST_MAP_INFO_INIT; GST_DEBUG_OBJECT (self, "Handling codec data"); caps = gst_caps_copy (gst_pad_get_current_caps (GST_AUDIO_ENCODER_SRC_PAD (self))); codec_data = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen); gst_buffer_map (codec_data, &map, GST_MAP_WRITE); memcpy (map.data, buf->omx_buf->pBuffer + buf->omx_buf->nOffset, buf->omx_buf->nFilledLen); gst_buffer_unmap (codec_data, &map); gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, codec_data, NULL); if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) { gst_caps_unref (caps); if (buf) gst_omx_port_release_buffer (self->enc_out_port, buf); GST_AUDIO_ENCODER_STREAM_UNLOCK (self); goto caps_failed; } gst_caps_unref (caps); flow_ret = GST_FLOW_OK; } else if (buf->omx_buf->nFilledLen > 0) { GstBuffer *outbuf; guint n_samples; GST_DEBUG_OBJECT (self, "Handling output data"); n_samples = klass->get_num_samples (self, self->enc_out_port, gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self)), buf); if (buf->omx_buf->nFilledLen > 0) { GstMapInfo map = GST_MAP_INFO_INIT; outbuf = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen); gst_buffer_map (outbuf, &map, GST_MAP_WRITE); memcpy (map.data, buf->omx_buf->pBuffer + buf->omx_buf->nOffset, buf->omx_buf->nFilledLen); gst_buffer_unmap (outbuf, &map); } else { outbuf = gst_buffer_new (); } GST_BUFFER_TIMESTAMP (outbuf) = gst_util_uint64_scale (buf->omx_buf->nTimeStamp, GST_SECOND, OMX_TICKS_PER_SECOND); if (buf->omx_buf->nTickCount != 0) GST_BUFFER_DURATION (outbuf) = gst_util_uint64_scale (buf->omx_buf->nTickCount, GST_SECOND, OMX_TICKS_PER_SECOND); flow_ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (self), outbuf, n_samples); } GST_DEBUG_OBJECT (self, "Handled output data"); GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret)); err = gst_omx_port_release_buffer (port, buf); if (err != OMX_ErrorNone) goto release_error; self->downstream_flow_ret = flow_ret; if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_ENCODER_STREAM_UNLOCK (self); return; component_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("OpenMAX component in error state %s (0x%08x)", gst_omx_component_get_last_error_string (self->enc), gst_omx_component_get_last_error (self->enc))); gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; return; } flushing: { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_FLUSHING; self->started = FALSE; return; } eos: { g_mutex_lock (&self->drain_lock); if (self->draining) { GST_DEBUG_OBJECT (self, "Drained"); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); flow_ret = GST_FLOW_OK; gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); } else { GST_DEBUG_OBJECT (self, "Component signalled EOS"); flow_ret = GST_FLOW_EOS; } g_mutex_unlock (&self->drain_lock); GST_AUDIO_ENCODER_STREAM_LOCK (self); self->downstream_flow_ret = flow_ret; /* Here we fallback and pause the task for the EOS case */ if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_ENCODER_STREAM_UNLOCK (self); return; } flow_error: { if (flow_ret == GST_FLOW_EOS) { GST_DEBUG_OBJECT (self, "EOS"); gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); } else if (flow_ret == GST_FLOW_NOT_LINKED || flow_ret < GST_FLOW_EOS) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); } self->started = FALSE; GST_AUDIO_ENCODER_STREAM_UNLOCK (self); return; } reconfigure_error: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Unable to reconfigure output port")); gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; return; } caps_failed: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to set caps")); gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; return; } release_error: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to relase output buffer to component: %s (0x%08x)", gst_omx_error_to_string (err), err)); gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; GST_AUDIO_ENCODER_STREAM_UNLOCK (self); return; } }
static GstFlowReturn gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc, GstBuffer * buffer, gint * have_data) { GstAudioEncoder *enc; AVCodecContext *ctx; gint res; GstFlowReturn ret; GstAudioInfo *info; AVPacket *pkt; AVFrame *frame = ffmpegaudenc->frame; gboolean planar; gint nsamples = -1; enc = GST_AUDIO_ENCODER (ffmpegaudenc); ctx = ffmpegaudenc->context; pkt = g_slice_new0 (AVPacket); if (buffer != NULL) { BufferInfo *buffer_info = g_slice_new0 (BufferInfo); guint8 *audio_in; guint in_size; buffer_info->buffer = buffer; gst_buffer_map (buffer, &buffer_info->map, GST_MAP_READ); audio_in = buffer_info->map.data; in_size = buffer_info->map.size; GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer %p size:%u", audio_in, in_size); info = gst_audio_encoder_get_audio_info (enc); planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt); frame->format = ffmpegaudenc->context->sample_fmt; frame->sample_rate = ffmpegaudenc->context->sample_rate; frame->channels = ffmpegaudenc->context->channels; frame->channel_layout = ffmpegaudenc->context->channel_layout; if (planar && info->channels > 1) { gint channels; gint i, j; nsamples = frame->nb_samples = in_size / info->bpf; channels = info->channels; frame->buf[0] = av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0); if (info->channels > AV_NUM_DATA_POINTERS) { buffer_info->ext_data_array = frame->extended_data = av_malloc_array (info->channels, sizeof (uint8_t *)); } else { frame->extended_data = frame->data; } buffer_info->ext_data = frame->extended_data[0] = av_malloc (in_size); frame->linesize[0] = in_size / channels; for (i = 1; i < channels; i++) frame->extended_data[i] = frame->extended_data[i - 1] + frame->linesize[0]; switch (info->finfo->width) { case 8: { const guint8 *idata = (const guint8 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint8 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } case 16: { const guint16 *idata = (const guint16 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint16 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } case 32: { const guint32 *idata = (const guint32 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint32 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } case 64: { const guint64 *idata = (const guint64 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint64 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } default: g_assert_not_reached (); break; } gst_buffer_unmap (buffer, &buffer_info->map); gst_buffer_unref (buffer); buffer_info->buffer = NULL; } else { frame->data[0] = audio_in; frame->extended_data = frame->data; frame->linesize[0] = in_size; frame->nb_samples = nsamples = in_size / info->bpf; frame->buf[0] = av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0); } /* we have a frame to feed the encoder */ res = avcodec_encode_audio2 (ctx, pkt, frame, have_data); av_frame_unref (frame); } else { GST_LOG_OBJECT (ffmpegaudenc, "draining"); /* flushing the encoder */ res = avcodec_encode_audio2 (ctx, pkt, NULL, have_data); } if (res < 0) { char error_str[128] = { 0, }; g_slice_free (AVPacket, pkt); av_strerror (res, error_str, sizeof (error_str)); GST_ERROR_OBJECT (enc, "Failed to encode buffer: %d - %s", res, error_str); return GST_FLOW_OK; } GST_LOG_OBJECT (ffmpegaudenc, "got output size %d", res); if (*have_data) { GstBuffer *outbuf; const AVCodec *codec; GST_LOG_OBJECT (ffmpegaudenc, "pushing size %d", pkt->size); outbuf = gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data, pkt->size, 0, pkt->size, pkt, gst_ffmpegaudenc_free_avpacket); codec = ffmpegaudenc->context->codec; if ((codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) || !buffer) { /* FIXME: Not really correct, as -1 means "all the samples we got given so far", which may not be true depending on the codec, but we have no way to know AFAICT */ ret = gst_audio_encoder_finish_frame (enc, outbuf, -1); } else { ret = gst_audio_encoder_finish_frame (enc, outbuf, nsamples); } } else { GST_LOG_OBJECT (ffmpegaudenc, "no output produced"); g_slice_free (AVPacket, pkt); ret = GST_FLOW_OK; } return ret; }