static gint gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec, AVCodec * in_plugin, guint8 * data, guint size, GstBuffer ** outbuf, GstFlowReturn * ret) { gint len = -1; gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE; AVPacket packet; AVFrame frame; GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size); gst_avpacket_init (&packet, data, size); memset (&frame, 0, sizeof (frame)); avcodec_get_frame_defaults (&frame); len = avcodec_decode_audio4 (ffmpegdec->context, &frame, &have_data, &packet); GST_DEBUG_OBJECT (ffmpegdec, "Decode audio: len=%d, have_data=%d", len, have_data); if (len >= 0 && have_data > 0) { BufferInfo *buffer_info = frame.opaque; gint nsamples, channels, byte_per_sample; gsize output_size; if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) { *outbuf = NULL; *ret = GST_FLOW_NOT_NEGOTIATED; len = -1; goto beach; } channels = ffmpegdec->info.channels; nsamples = frame.nb_samples; byte_per_sample = ffmpegdec->info.finfo->width / 8; /* frame.linesize[0] might contain padding, allocate only what's needed */ output_size = nsamples * byte_per_sample * channels; GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer"); if (buffer_info) { *outbuf = buffer_info->buffer; gst_buffer_unmap (buffer_info->buffer, &buffer_info->map); g_slice_free (BufferInfo, buffer_info); frame.opaque = NULL; } else if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt) && channels > 1) { gint i, j; GstMapInfo minfo; /* note: linesize[0] might contain padding, allocate only what's needed */ *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE); switch (ffmpegdec->info.finfo->width) { case 8:{ guint8 *odata = minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint8 *) frame.extended_data[j])[i]; } odata += channels; } break; } case 16:{ guint16 *odata = (guint16 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint16 *) frame.extended_data[j])[i]; } odata += channels; } break; } case 32:{ guint32 *odata = (guint32 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint32 *) frame.extended_data[j])[i]; } odata += channels; } break; } case 64:{ guint64 *odata = (guint64 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint64 *) frame.extended_data[j])[i]; } odata += channels; } break; } default: g_assert_not_reached (); break; } gst_buffer_unmap (*outbuf, &minfo); } else { *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_fill (*outbuf, 0, frame.data[0], output_size); } GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %d", have_data); /* Reorder channels to the GStreamer channel order */ if (ffmpegdec->needs_reorder) { *outbuf = gst_buffer_make_writable (*outbuf); gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format, ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout, ffmpegdec->info.position); } } else { *outbuf = NULL; } beach: GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d", *ret, *outbuf, len); return len; }
static gint gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec, AVCodec * in_plugin, guint8 * data, guint size, gint * have_data, GstBuffer ** outbuf, GstFlowReturn * ret) { gint len = -1; AVPacket packet; GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size); gst_avpacket_init (&packet, data, size); len = avcodec_decode_audio4 (ffmpegdec->context, ffmpegdec->frame, have_data, &packet); GST_DEBUG_OBJECT (ffmpegdec, "Decode audio: len=%d, have_data=%d", len, *have_data); if (len >= 0 && *have_data) { gint nsamples, channels, byte_per_sample; gsize output_size; if (!gst_ffmpegauddec_negotiate (ffmpegdec, ffmpegdec->context, ffmpegdec->frame, FALSE)) { *outbuf = NULL; *ret = GST_FLOW_NOT_NEGOTIATED; len = -1; goto beach; } channels = ffmpegdec->info.channels; nsamples = ffmpegdec->frame->nb_samples; byte_per_sample = ffmpegdec->info.finfo->width / 8; /* ffmpegdec->frame->linesize[0] might contain padding, allocate only what's needed */ output_size = nsamples * byte_per_sample * channels; GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer"); if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt) && channels > 1) { gint i, j; GstMapInfo minfo; /* note: linesize[0] might contain padding, allocate only what's needed */ *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE); switch (ffmpegdec->info.finfo->width) { case 8:{ guint8 *odata = minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint8 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } case 16:{ guint16 *odata = (guint16 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint16 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } case 32:{ guint32 *odata = (guint32 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint32 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } case 64:{ guint64 *odata = (guint64 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint64 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } default: g_assert_not_reached (); break; } gst_buffer_unmap (*outbuf, &minfo); } else { *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_fill (*outbuf, 0, ffmpegdec->frame->data[0], output_size); } GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %" G_GSIZE_FORMAT, output_size); /* Reorder channels to the GStreamer channel order */ if (ffmpegdec->needs_reorder) { *outbuf = gst_buffer_make_writable (*outbuf); gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format, ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout, ffmpegdec->info.position); } /* Mark corrupted frames as corrupted */ if (ffmpegdec->frame->flags & AV_FRAME_FLAG_CORRUPT) GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_CORRUPTED); } else { *outbuf = NULL; } beach: av_frame_unref (ffmpegdec->frame); GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d", *ret, *outbuf, len); return len; }