static GstFlowReturn gst_atdec_handle_frame (GstAudioDecoder * decoder, GstBuffer * buffer) { AudioTimeStamp timestamp = { 0 }; AudioStreamPacketDescription packet; AudioQueueBufferRef input_buffer, output_buffer; GstBuffer *out; GstMapInfo info; GstAudioInfo *audio_info; int size, out_frames; GstFlowReturn flow_ret = GST_FLOW_OK; GstATDec *atdec = GST_ATDEC (decoder); if (buffer == NULL) return GST_FLOW_OK; audio_info = gst_audio_decoder_get_audio_info (decoder); /* copy the input buffer into an AudioQueueBuffer */ size = gst_buffer_get_size (buffer); AudioQueueAllocateBuffer (atdec->queue, size, &input_buffer); gst_buffer_extract (buffer, 0, input_buffer->mAudioData, size); input_buffer->mAudioDataByteSize = size; /* assume framed input */ packet.mStartOffset = 0; packet.mVariableFramesInPacket = 1; packet.mDataByteSize = size; /* enqueue the buffer. It will get free'd once the gst_atdec_buffer_emptied * callback is called */ AudioQueueEnqueueBuffer (atdec->queue, input_buffer, 1, &packet); /* figure out how many frames we need to pull out of the queue */ out_frames = GST_CLOCK_TIME_TO_FRAMES (GST_BUFFER_DURATION (buffer), audio_info->rate); size = out_frames * audio_info->bpf; AudioQueueAllocateBuffer (atdec->queue, size, &output_buffer); /* pull the frames */ AudioQueueOfflineRender (atdec->queue, ×tamp, output_buffer, out_frames); if (output_buffer->mAudioDataByteSize) { out = gst_audio_decoder_allocate_output_buffer (decoder, output_buffer->mAudioDataByteSize); gst_buffer_map (out, &info, GST_MAP_WRITE); memcpy (info.data, output_buffer->mAudioData, output_buffer->mAudioDataByteSize); gst_buffer_unmap (out, &info); flow_ret = gst_audio_decoder_finish_frame (decoder, out, 1); } AudioQueueFreeBuffer (atdec->queue, output_buffer); return flow_ret; }
/* called when ffmpeg wants us to allocate a buffer to write the decoded frame * into. We try to give it memory from our pool */ static int gst_ffmpegauddec_get_buffer (AVCodecContext * context, AVFrame * frame) { GstFFMpegAudDec *ffmpegdec; GstAudioInfo *info; BufferInfo *buffer_info; ffmpegdec = (GstFFMpegAudDec *) context->opaque; if (G_UNLIKELY (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE))) goto negotiate_failed; /* Always use the default allocator for planar audio formats because * we will have to copy and deinterleave later anyway */ if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt)) goto fallback; info = gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (ffmpegdec)); buffer_info = g_slice_new (BufferInfo); buffer_info->buffer = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), frame->nb_samples * info->bpf); gst_buffer_map (buffer_info->buffer, &buffer_info->map, GST_MAP_WRITE); frame->opaque = buffer_info; frame->data[0] = buffer_info->map.data; frame->extended_data = frame->data; frame->linesize[0] = buffer_info->map.size; frame->type = FF_BUFFER_TYPE_USER; return 0; /* fallbacks */ negotiate_failed: { GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed"); goto fallback; } fallback: { return avcodec_default_get_buffer (context, frame); } }
static void gst_droidadec_data_available (void *data, DroidMediaCodecData * encoded) { GstFlowReturn flow_ret; GstDroidADec *dec = (GstDroidADec *) data; GstAudioDecoder *decoder = GST_AUDIO_DECODER (dec); GstBuffer *out; GstMapInfo info; GST_DEBUG_OBJECT (dec, "data available of size %d", encoded->data.size); GST_AUDIO_DECODER_STREAM_LOCK (decoder); if (G_UNLIKELY (dec->downstream_flow_ret != GST_FLOW_OK)) { GST_DEBUG_OBJECT (dec, "not handling data in error state: %s", gst_flow_get_name (dec->downstream_flow_ret)); flow_ret = dec->downstream_flow_ret; gst_audio_decoder_finish_frame (decoder, NULL, 1); goto out; } if (G_UNLIKELY (gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (dec))->finfo->format == GST_AUDIO_FORMAT_UNKNOWN)) { DroidMediaCodecMetaData md; DroidMediaRect crop; /* TODO: get rid of that */ GstAudioInfo info; memset (&md, 0x0, sizeof (md)); droid_media_codec_get_output_info (dec->codec, &md, &crop); GST_INFO_OBJECT (dec, "output rate=%d, output channels=%d", md.sample_rate, md.channels); gst_audio_info_init (&info); gst_audio_info_set_format (&info, GST_AUDIO_FORMAT_S16, md.sample_rate, md.channels, NULL); if (!gst_audio_decoder_set_output_format (decoder, &info)) { flow_ret = GST_FLOW_ERROR; goto out; } dec->info = gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (dec)); } out = gst_audio_decoder_allocate_output_buffer (decoder, encoded->data.size); gst_buffer_map (out, &info, GST_MAP_READWRITE); orc_memcpy (info.data, encoded->data.data, encoded->data.size); gst_buffer_unmap (out, &info); // GST_WARNING_OBJECT (dec, "bpf %d, bps %d", dec->info->bpf, GST_AUDIO_INFO_BPS(dec->info)); if (dec->spf == -1 || (encoded->data.size == dec->spf * dec->info->bpf && gst_adapter_available (dec->adapter) == 0)) { /* fast path. no need for anything */ goto push; } gst_adapter_push (dec->adapter, out); if (gst_adapter_available (dec->adapter) >= dec->spf * dec->info->bpf) { out = gst_adapter_take_buffer (dec->adapter, dec->spf * dec->info->bpf); } else { flow_ret = GST_FLOW_OK; goto out; } push: GST_DEBUG_OBJECT (dec, "pushing %d bytes out", gst_buffer_get_size (out)); flow_ret = gst_audio_decoder_finish_frame (decoder, out, 1); if (flow_ret == GST_FLOW_OK || flow_ret == GST_FLOW_FLUSHING) { goto out; } else if (flow_ret == GST_FLOW_EOS) { GST_INFO_OBJECT (dec, "eos"); } else if (flow_ret < GST_FLOW_OK) { GST_ELEMENT_ERROR (dec, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); } out: dec->downstream_flow_ret = flow_ret; GST_AUDIO_DECODER_STREAM_UNLOCK (decoder); }