static GstFlowReturn gst_atdec_handle_frame (GstAudioDecoder * decoder, GstBuffer * buffer) { AudioTimeStamp timestamp = { 0 }; AudioStreamPacketDescription packet; AudioQueueBufferRef input_buffer, output_buffer; GstBuffer *out; GstMapInfo info; GstAudioInfo *audio_info; int size, out_frames; GstFlowReturn flow_ret = GST_FLOW_OK; GstATDec *atdec = GST_ATDEC (decoder); if (buffer == NULL) return GST_FLOW_OK; audio_info = gst_audio_decoder_get_audio_info (decoder); /* copy the input buffer into an AudioQueueBuffer */ size = gst_buffer_get_size (buffer); AudioQueueAllocateBuffer (atdec->queue, size, &input_buffer); gst_buffer_extract (buffer, 0, input_buffer->mAudioData, size); input_buffer->mAudioDataByteSize = size; /* assume framed input */ packet.mStartOffset = 0; packet.mVariableFramesInPacket = 1; packet.mDataByteSize = size; /* enqueue the buffer. It will get free'd once the gst_atdec_buffer_emptied * callback is called */ AudioQueueEnqueueBuffer (atdec->queue, input_buffer, 1, &packet); /* figure out how many frames we need to pull out of the queue */ out_frames = GST_CLOCK_TIME_TO_FRAMES (GST_BUFFER_DURATION (buffer), audio_info->rate); size = out_frames * audio_info->bpf; AudioQueueAllocateBuffer (atdec->queue, size, &output_buffer); /* pull the frames */ AudioQueueOfflineRender (atdec->queue, ×tamp, output_buffer, out_frames); if (output_buffer->mAudioDataByteSize) { out = gst_audio_decoder_allocate_output_buffer (decoder, output_buffer->mAudioDataByteSize); gst_buffer_map (out, &info, GST_MAP_WRITE); memcpy (info.data, output_buffer->mAudioData, output_buffer->mAudioDataByteSize); gst_buffer_unmap (out, &info); flow_ret = gst_audio_decoder_finish_frame (decoder, out, 1); } AudioQueueFreeBuffer (atdec->queue, output_buffer); return flow_ret; }
static GstFlowReturn gst_mulawdec_handle_frame (GstAudioDecoder * dec, GstBuffer * buffer) { GstMapInfo inmap, outmap; gint16 *linear_data; guint8 *mulaw_data; gsize mulaw_size, linear_size; GstBuffer *outbuf; if (!buffer) { return GST_FLOW_OK; } if (!gst_buffer_map (buffer, &inmap, GST_MAP_READ)) { GST_ERROR ("failed to map input buffer"); goto error_failed_map_input_buffer; } mulaw_data = inmap.data; mulaw_size = inmap.size; linear_size = mulaw_size * 2; outbuf = gst_audio_decoder_allocate_output_buffer (dec, linear_size); if (!gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE)) { GST_ERROR ("failed to map input buffer"); goto error_failed_map_output_buffer; } linear_data = (gint16 *) outmap.data; mulaw_decode (mulaw_data, linear_data, mulaw_size); gst_buffer_unmap (outbuf, &outmap); gst_buffer_unmap (buffer, &inmap); return gst_audio_decoder_finish_frame (dec, outbuf, -1); error_failed_map_output_buffer: gst_buffer_unref (outbuf); error_failed_map_input_buffer: return GST_FLOW_ERROR; }
/* called when ffmpeg wants us to allocate a buffer to write the decoded frame * into. We try to give it memory from our pool */ static int gst_ffmpegauddec_get_buffer (AVCodecContext * context, AVFrame * frame) { GstFFMpegAudDec *ffmpegdec; GstAudioInfo *info; BufferInfo *buffer_info; ffmpegdec = (GstFFMpegAudDec *) context->opaque; if (G_UNLIKELY (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE))) goto negotiate_failed; /* Always use the default allocator for planar audio formats because * we will have to copy and deinterleave later anyway */ if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt)) goto fallback; info = gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (ffmpegdec)); buffer_info = g_slice_new (BufferInfo); buffer_info->buffer = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), frame->nb_samples * info->bpf); gst_buffer_map (buffer_info->buffer, &buffer_info->map, GST_MAP_WRITE); frame->opaque = buffer_info; frame->data[0] = buffer_info->map.data; frame->extended_data = frame->data; frame->linesize[0] = buffer_info->map.size; frame->type = FF_BUFFER_TYPE_USER; return 0; /* fallbacks */ negotiate_failed: { GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed"); goto fallback; } fallback: { return avcodec_default_get_buffer (context, frame); } }
static GstFlowReturn opus_dec_chain_parse_data (GstOpusDec * dec, GstBuffer * buffer) { GstFlowReturn res = GST_FLOW_OK; gsize size; guint8 *data; GstBuffer *outbuf; gint16 *out_data; int n, err; int samples; unsigned int packet_size; GstBuffer *buf; GstMapInfo map, omap; if (dec->state == NULL) { /* If we did not get any headers, default to 2 channels */ if (dec->n_channels == 0) { GST_INFO_OBJECT (dec, "No header, assuming single stream"); dec->n_channels = 2; dec->sample_rate = 48000; /* default stereo mapping */ dec->channel_mapping_family = 0; dec->channel_mapping[0] = 0; dec->channel_mapping[1] = 1; dec->n_streams = 1; dec->n_stereo_streams = 1; gst_opus_dec_negotiate (dec, NULL); } GST_DEBUG_OBJECT (dec, "Creating decoder with %d channels, %d Hz", dec->n_channels, dec->sample_rate); #ifndef GST_DISABLE_GST_DEBUG gst_opus_common_log_channel_mapping_table (GST_ELEMENT (dec), opusdec_debug, "Mapping table", dec->n_channels, dec->channel_mapping); #endif GST_DEBUG_OBJECT (dec, "%d streams, %d stereo", dec->n_streams, dec->n_stereo_streams); dec->state = opus_multistream_decoder_create (dec->sample_rate, dec->n_channels, dec->n_streams, dec->n_stereo_streams, dec->channel_mapping, &err); if (!dec->state || err != OPUS_OK) goto creation_failed; } if (buffer) { GST_DEBUG_OBJECT (dec, "Received buffer of size %" G_GSIZE_FORMAT, gst_buffer_get_size (buffer)); } else { GST_DEBUG_OBJECT (dec, "Received missing buffer"); } /* if using in-band FEC, we introdude one extra frame's delay as we need to potentially wait for next buffer to decode a missing buffer */ if (dec->use_inband_fec && !dec->primed) { GST_DEBUG_OBJECT (dec, "First buffer received in FEC mode, early out"); gst_buffer_replace (&dec->last_buffer, buffer); dec->primed = TRUE; goto done; } /* That's the buffer we'll be sending to the opus decoder. */ buf = (dec->use_inband_fec && gst_buffer_get_size (dec->last_buffer) > 0) ? dec->last_buffer : buffer; if (buf && gst_buffer_get_size (buf) > 0) { gst_buffer_map (buf, &map, GST_MAP_READ); data = map.data; size = map.size; GST_DEBUG_OBJECT (dec, "Using buffer of size %" G_GSIZE_FORMAT, size); } else { /* concealment data, pass NULL as the bits parameters */ GST_DEBUG_OBJECT (dec, "Using NULL buffer"); data = NULL; size = 0; } /* use maximum size (120 ms) as the number of returned samples is not constant over the stream. */ samples = 120 * dec->sample_rate / 1000; packet_size = samples * dec->n_channels * 2; outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec), packet_size); if (!outbuf) { goto buffer_failed; } gst_buffer_map (outbuf, &omap, GST_MAP_WRITE); out_data = (gint16 *) omap.data; if (dec->use_inband_fec) { if (dec->last_buffer) { /* normal delayed decode */ GST_LOG_OBJECT (dec, "FEC enabled, decoding last delayed buffer"); n = opus_multistream_decode (dec->state, data, size, out_data, samples, 0); } else { /* FEC reconstruction decode */ GST_LOG_OBJECT (dec, "FEC enabled, reconstructing last buffer"); n = opus_multistream_decode (dec->state, data, size, out_data, samples, 1); } } else { /* normal decode */ GST_LOG_OBJECT (dec, "FEC disabled, decoding buffer"); n = opus_multistream_decode (dec->state, data, size, out_data, samples, 0); } gst_buffer_unmap (outbuf, &omap); if (data != NULL) gst_buffer_unmap (buf, &map); if (n < 0) { GST_ELEMENT_ERROR (dec, STREAM, DECODE, ("Decoding error: %d", n), (NULL)); gst_buffer_unref (outbuf); return GST_FLOW_ERROR; } GST_DEBUG_OBJECT (dec, "decoded %d samples", n); gst_buffer_set_size (outbuf, n * 2 * dec->n_channels); /* Skip any samples that need skipping */ if (dec->pre_skip > 0) { guint scaled_pre_skip = dec->pre_skip * dec->sample_rate / 48000; guint skip = scaled_pre_skip > n ? n : scaled_pre_skip; guint scaled_skip = skip * 48000 / dec->sample_rate; gst_buffer_resize (outbuf, skip * 2 * dec->n_channels, -1); dec->pre_skip -= scaled_skip; GST_INFO_OBJECT (dec, "Skipping %u samples (%u at 48000 Hz, %u left to skip)", skip, scaled_skip, dec->pre_skip); } if (gst_buffer_get_size (outbuf) == 0) { gst_buffer_unref (outbuf); outbuf = NULL; } else if (dec->opus_pos[0] != GST_AUDIO_CHANNEL_POSITION_INVALID) { gst_audio_buffer_reorder_channels (outbuf, GST_AUDIO_FORMAT_S16, dec->n_channels, dec->opus_pos, dec->info.position); } /* Apply gain */ /* Would be better off leaving this to a volume element, as this is a naive conversion that does too many int/float conversions. However, we don't have control over the pipeline... So make it optional if the user program wants to use a volume, but do it by default so the correct volume goes out by default */ if (dec->apply_gain && outbuf && dec->r128_gain) { gsize rsize; unsigned int i, nsamples; double volume = dec->r128_gain_volume; gint16 *samples; gst_buffer_map (outbuf, &omap, GST_MAP_READWRITE); samples = (gint16 *) omap.data; rsize = omap.size; GST_DEBUG_OBJECT (dec, "Applying gain: volume %f", volume); nsamples = rsize / 2; for (i = 0; i < nsamples; ++i) { int sample = (int) (samples[i] * volume + 0.5); samples[i] = sample < -32768 ? -32768 : sample > 32767 ? 32767 : sample; } gst_buffer_unmap (outbuf, &omap); } if (dec->use_inband_fec) { gst_buffer_replace (&dec->last_buffer, buffer); } res = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (dec), outbuf, 1); if (res != GST_FLOW_OK) GST_DEBUG_OBJECT (dec, "flow: %s", gst_flow_get_name (res)); done: return res; creation_failed: GST_ERROR_OBJECT (dec, "Failed to create Opus decoder: %d", err); return GST_FLOW_ERROR; buffer_failed: GST_ERROR_OBJECT (dec, "Failed to create %u byte buffer", packet_size); return GST_FLOW_ERROR; }
static GstFlowReturn vorbis_handle_data_packet (GstVorbisDec * vd, ogg_packet * packet, GstClockTime timestamp, GstClockTime duration) { #ifdef USE_TREMOLO vorbis_sample_t *pcm; #else vorbis_sample_t **pcm; #endif guint sample_count; GstBuffer *out = NULL; GstFlowReturn result; GstMapInfo map; gsize size; if (G_UNLIKELY (!vd->initialized)) { result = vorbis_dec_handle_header_caps (vd); if (result != GST_FLOW_OK) goto not_initialized; } /* normal data packet */ /* FIXME, we can skip decoding if the packet is outside of the * segment, this is however not very trivial as we need a previous * packet to decode the current one so we must be careful not to * throw away too much. For now we decode everything and clip right * before pushing data. */ #ifdef USE_TREMOLO if (G_UNLIKELY (vorbis_dsp_synthesis (&vd->vd, packet, 1))) goto could_not_read; #else if (G_UNLIKELY (vorbis_synthesis (&vd->vb, packet))) goto could_not_read; if (G_UNLIKELY (vorbis_synthesis_blockin (&vd->vd, &vd->vb) < 0)) goto not_accepted; #endif /* assume all goes well here */ result = GST_FLOW_OK; /* count samples ready for reading */ #ifdef USE_TREMOLO if ((sample_count = vorbis_dsp_pcmout (&vd->vd, NULL, 0)) == 0) #else if ((sample_count = vorbis_synthesis_pcmout (&vd->vd, NULL)) == 0) goto done; #endif size = sample_count * vd->info.bpf; GST_LOG_OBJECT (vd, "%d samples ready for reading, size %" G_GSIZE_FORMAT, sample_count, size); /* alloc buffer for it */ out = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (vd), size); gst_buffer_map (out, &map, GST_MAP_WRITE); /* get samples ready for reading now, should be sample_count */ #ifdef USE_TREMOLO if (G_UNLIKELY (vorbis_dsp_pcmout (&vd->vd, map.data, sample_count) != sample_count)) #else if (G_UNLIKELY (vorbis_synthesis_pcmout (&vd->vd, &pcm) != sample_count)) #endif goto wrong_samples; #ifdef USE_TREMOLO if (vd->info.channels < 9) gst_audio_reorder_channels (map.data, map.size, GST_VORBIS_AUDIO_FORMAT, vd->info.channels, gst_vorbis_channel_positions[vd->info.channels - 1], gst_vorbis_default_channel_positions[vd->info.channels - 1]); #else /* copy samples in buffer */ vd->copy_samples ((vorbis_sample_t *) map.data, pcm, sample_count, vd->info.channels); #endif GST_LOG_OBJECT (vd, "have output size of %" G_GSIZE_FORMAT, size); gst_buffer_unmap (out, &map); done: /* whether or not data produced, consume one frame and advance time */ result = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (vd), out, 1); #ifdef USE_TREMOLO vorbis_dsp_read (&vd->vd, sample_count); #else vorbis_synthesis_read (&vd->vd, sample_count); #endif return result; /* ERRORS */ not_initialized: { GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE, (NULL), ("no header sent yet")); return GST_FLOW_NOT_NEGOTIATED; } could_not_read: { GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE, (NULL), ("couldn't read data packet")); return GST_FLOW_ERROR; } not_accepted: { GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE, (NULL), ("vorbis decoder did not accept data packet")); return GST_FLOW_ERROR; } wrong_samples: { gst_buffer_unref (out); GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE, (NULL), ("vorbis decoder reported wrong number of samples")); return GST_FLOW_ERROR; } }
static void gst_amc_audio_dec_loop (GstAmcAudioDec * self) { GstFlowReturn flow_ret = GST_FLOW_OK; gboolean is_eos; GstAmcBuffer *buf; GstAmcBufferInfo buffer_info; gint idx; GError *err = NULL; GST_AUDIO_DECODER_STREAM_LOCK (self); retry: /*if (self->input_caps_changed) { idx = INFO_OUTPUT_FORMAT_CHANGED; } else { */ GST_DEBUG_OBJECT (self, "Waiting for available output buffer"); GST_AUDIO_DECODER_STREAM_UNLOCK (self); /* Wait at most 100ms here, some codecs don't fail dequeueing if * the codec is flushing, causing deadlocks during shutdown */ idx = gst_amc_codec_dequeue_output_buffer (self->codec, &buffer_info, 100000, &err); GST_AUDIO_DECODER_STREAM_LOCK (self); /*} */ if (idx < 0) { if (self->flushing) { g_clear_error (&err); goto flushing; } switch (idx) { case INFO_OUTPUT_BUFFERS_CHANGED: /* Handled internally */ g_assert_not_reached (); break; case INFO_OUTPUT_FORMAT_CHANGED:{ GstAmcFormat *format; gchar *format_string; GST_DEBUG_OBJECT (self, "Output format has changed"); format = gst_amc_codec_get_output_format (self->codec, &err); if (!format) goto format_error; format_string = gst_amc_format_to_string (format, &err); if (err) { gst_amc_format_free (format); goto format_error; } GST_DEBUG_OBJECT (self, "Got new output format: %s", format_string); g_free (format_string); if (!gst_amc_audio_dec_set_src_caps (self, format)) { gst_amc_format_free (format); goto format_error; } gst_amc_format_free (format); goto retry; } case INFO_TRY_AGAIN_LATER: GST_DEBUG_OBJECT (self, "Dequeueing output buffer timed out"); goto retry; case G_MININT: GST_ERROR_OBJECT (self, "Failure dequeueing output buffer"); goto dequeue_error; default: g_assert_not_reached (); break; } goto retry; } GST_DEBUG_OBJECT (self, "Got output buffer at index %d: offset %d size %d time %" G_GINT64_FORMAT " flags 0x%08x", idx, buffer_info.offset, buffer_info.size, buffer_info.presentation_time_us, buffer_info.flags); is_eos = ! !(buffer_info.flags & BUFFER_FLAG_END_OF_STREAM); buf = gst_amc_codec_get_output_buffer (self->codec, idx, &err); if (!buf) goto failed_to_get_output_buffer; if (buffer_info.size > 0) { GstBuffer *outbuf; GstMapInfo minfo; /* This sometimes happens at EOS or if the input is not properly framed, * let's handle it gracefully by allocating a new buffer for the current * caps and filling it */ if (buffer_info.size % self->info.bpf != 0) goto invalid_buffer_size; outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (self), buffer_info.size); if (!outbuf) goto failed_allocate; gst_buffer_map (outbuf, &minfo, GST_MAP_WRITE); if (self->needs_reorder) { gint i, n_samples, c, n_channels; gint *reorder_map = self->reorder_map; gint16 *dest, *source; dest = (gint16 *) minfo.data; source = (gint16 *) (buf->data + buffer_info.offset); n_samples = buffer_info.size / self->info.bpf; n_channels = self->info.channels; for (i = 0; i < n_samples; i++) { for (c = 0; c < n_channels; c++) { dest[i * n_channels + reorder_map[c]] = source[i * n_channels + c]; } } } else { orc_memcpy (minfo.data, buf->data + buffer_info.offset, buffer_info.size); } gst_buffer_unmap (outbuf, &minfo); if (self->spf != -1) { gst_adapter_push (self->output_adapter, outbuf); } else { flow_ret = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (self), outbuf, 1); } } gst_amc_buffer_free (buf); buf = NULL; if (self->spf != -1) { GstBuffer *outbuf; guint avail = gst_adapter_available (self->output_adapter); guint nframes; /* On EOS we take the complete adapter content, no matter * if it is a multiple of the codec frame size or not. * Otherwise we take a multiple of codec frames and push * them downstream */ avail /= self->info.bpf; if (!is_eos) { nframes = avail / self->spf; avail = nframes * self->spf; } else { nframes = (avail + self->spf - 1) / self->spf; } avail *= self->info.bpf; if (avail > 0) { outbuf = gst_adapter_take_buffer (self->output_adapter, avail); flow_ret = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (self), outbuf, nframes); } } if (!gst_amc_codec_release_output_buffer (self->codec, idx, &err)) { if (self->flushing) { g_clear_error (&err); goto flushing; } goto failed_release; } if (is_eos || flow_ret == GST_FLOW_EOS) { GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); if (self->draining) { GST_DEBUG_OBJECT (self, "Drained"); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); } else if (flow_ret == GST_FLOW_OK) { GST_DEBUG_OBJECT (self, "Component signalled EOS"); flow_ret = GST_FLOW_EOS; } g_mutex_unlock (&self->drain_lock); GST_AUDIO_DECODER_STREAM_LOCK (self); } else { GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret)); } self->downstream_flow_ret = flow_ret; if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; dequeue_error: { GST_ELEMENT_ERROR_FROM_ERROR (self, err); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); g_mutex_unlock (&self->drain_lock); return; } format_error: { if (err) GST_ELEMENT_ERROR_FROM_ERROR (self, err); else GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("Failed to handle format")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); g_mutex_unlock (&self->drain_lock); return; } failed_release: { GST_AUDIO_DECODER_ERROR_FROM_ERROR (self, err); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); g_mutex_unlock (&self->drain_lock); return; } flushing: { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_FLUSHING; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } flow_error: { if (flow_ret == GST_FLOW_EOS) { GST_DEBUG_OBJECT (self, "EOS"); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); } else if (flow_ret < GST_FLOW_EOS) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); } else if (flow_ret == GST_FLOW_FLUSHING) { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); } GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); g_mutex_unlock (&self->drain_lock); return; } failed_to_get_output_buffer: { GST_AUDIO_DECODER_ERROR_FROM_ERROR (self, err); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); g_mutex_unlock (&self->drain_lock); return; } invalid_buffer_size: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("Invalid buffer size %u (bfp %d)", buffer_info.size, self->info.bpf)); gst_amc_codec_release_output_buffer (self->codec, idx, &err); if (err && !self->flushing) GST_ELEMENT_WARNING_FROM_ERROR (self, err); g_clear_error (&err); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); g_mutex_unlock (&self->drain_lock); return; } failed_allocate: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to allocate output buffer")); gst_amc_codec_release_output_buffer (self->codec, idx, &err); if (err && !self->flushing) GST_ELEMENT_WARNING_FROM_ERROR (self, err); g_clear_error (&err); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); g_mutex_unlock (&self->drain_lock); return; } }
static GstFlowReturn opus_dec_chain_parse_data (GstOpusDec * dec, GstBuffer * buffer) { GstFlowReturn res = GST_FLOW_OK; gsize size; guint8 *data; GstBuffer *outbuf, *bufd; gint16 *out_data; int n, err; int samples; unsigned int packet_size; GstBuffer *buf; GstMapInfo map, omap; GstAudioClippingMeta *cmeta = NULL; if (dec->state == NULL) { /* If we did not get any headers, default to 2 channels */ if (dec->n_channels == 0) { GST_INFO_OBJECT (dec, "No header, assuming single stream"); dec->n_channels = 2; dec->sample_rate = 48000; /* default stereo mapping */ dec->channel_mapping_family = 0; dec->channel_mapping[0] = 0; dec->channel_mapping[1] = 1; dec->n_streams = 1; dec->n_stereo_streams = 1; if (!gst_opus_dec_negotiate (dec, NULL)) return GST_FLOW_NOT_NEGOTIATED; } if (dec->n_channels == 2 && dec->n_streams == 1 && dec->n_stereo_streams == 0) { /* if we are automatically decoding 2 channels, but only have a single encoded one, direct both channels to it */ dec->channel_mapping[1] = 0; } GST_DEBUG_OBJECT (dec, "Creating decoder with %d channels, %d Hz", dec->n_channels, dec->sample_rate); #ifndef GST_DISABLE_GST_DEBUG gst_opus_common_log_channel_mapping_table (GST_ELEMENT (dec), opusdec_debug, "Mapping table", dec->n_channels, dec->channel_mapping); #endif GST_DEBUG_OBJECT (dec, "%d streams, %d stereo", dec->n_streams, dec->n_stereo_streams); dec->state = opus_multistream_decoder_create (dec->sample_rate, dec->n_channels, dec->n_streams, dec->n_stereo_streams, dec->channel_mapping, &err); if (!dec->state || err != OPUS_OK) goto creation_failed; } if (buffer) { GST_DEBUG_OBJECT (dec, "Received buffer of size %" G_GSIZE_FORMAT, gst_buffer_get_size (buffer)); } else { GST_DEBUG_OBJECT (dec, "Received missing buffer"); } /* if using in-band FEC, we introdude one extra frame's delay as we need to potentially wait for next buffer to decode a missing buffer */ if (dec->use_inband_fec && !dec->primed) { GST_DEBUG_OBJECT (dec, "First buffer received in FEC mode, early out"); gst_buffer_replace (&dec->last_buffer, buffer); dec->primed = TRUE; goto done; } /* That's the buffer we'll be sending to the opus decoder. */ buf = (dec->use_inband_fec && gst_buffer_get_size (dec->last_buffer) > 0) ? dec->last_buffer : buffer; /* That's the buffer we get duration from */ bufd = dec->use_inband_fec ? dec->last_buffer : buffer; if (buf && gst_buffer_get_size (buf) > 0) { gst_buffer_map (buf, &map, GST_MAP_READ); data = map.data; size = map.size; GST_DEBUG_OBJECT (dec, "Using buffer of size %" G_GSIZE_FORMAT, size); } else { /* concealment data, pass NULL as the bits parameters */ GST_DEBUG_OBJECT (dec, "Using NULL buffer"); data = NULL; size = 0; } if (gst_buffer_get_size (bufd) == 0) { GstClockTime const opus_plc_alignment = 2500 * GST_USECOND; GstClockTime aligned_missing_duration; GstClockTime missing_duration = GST_BUFFER_DURATION (bufd); if (!GST_CLOCK_TIME_IS_VALID (missing_duration) || missing_duration == 0) { if (GST_CLOCK_TIME_IS_VALID (dec->last_known_buffer_duration)) { missing_duration = dec->last_known_buffer_duration; GST_WARNING_OBJECT (dec, "Missing duration, using last duration %" GST_TIME_FORMAT, GST_TIME_ARGS (missing_duration)); } else { GST_WARNING_OBJECT (dec, "Missing buffer, but unknown duration, and no previously known duration, assuming 20 ms"); missing_duration = 20 * GST_MSECOND; } } GST_DEBUG_OBJECT (dec, "missing buffer, doing PLC duration %" GST_TIME_FORMAT " plus leftover %" GST_TIME_FORMAT, GST_TIME_ARGS (missing_duration), GST_TIME_ARGS (dec->leftover_plc_duration)); /* add the leftover PLC duration to that of the buffer */ missing_duration += dec->leftover_plc_duration; /* align the combined buffer and leftover PLC duration to multiples * of 2.5ms, rounding to nearest, and store excess duration for later */ aligned_missing_duration = ((missing_duration + opus_plc_alignment / 2) / opus_plc_alignment) * opus_plc_alignment; dec->leftover_plc_duration = missing_duration - aligned_missing_duration; /* Opus' PLC cannot operate with less than 2.5ms; skip PLC * and accumulate the missing duration in the leftover_plc_duration * for the next PLC attempt */ if (aligned_missing_duration < opus_plc_alignment) { GST_DEBUG_OBJECT (dec, "current duration %" GST_TIME_FORMAT " of missing data not enough for PLC (minimum needed: %" GST_TIME_FORMAT ") - skipping", GST_TIME_ARGS (missing_duration), GST_TIME_ARGS (opus_plc_alignment)); goto done; } /* convert the duration (in nanoseconds) to sample count */ samples = gst_util_uint64_scale_int (aligned_missing_duration, dec->sample_rate, GST_SECOND); GST_DEBUG_OBJECT (dec, "calculated PLC frame length: %" GST_TIME_FORMAT " num frame samples: %d new leftover: %" GST_TIME_FORMAT, GST_TIME_ARGS (aligned_missing_duration), samples, GST_TIME_ARGS (dec->leftover_plc_duration)); } else { /* use maximum size (120 ms) as the number of returned samples is not constant over the stream. */ samples = 120 * dec->sample_rate / 1000; } packet_size = samples * dec->n_channels * 2; outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec), packet_size); if (!outbuf) { goto buffer_failed; } if (size > 0) dec->last_known_buffer_duration = packet_duration_opus (data, size); gst_buffer_map (outbuf, &omap, GST_MAP_WRITE); out_data = (gint16 *) omap.data; do { if (dec->use_inband_fec) { if (gst_buffer_get_size (dec->last_buffer) > 0) { /* normal delayed decode */ GST_LOG_OBJECT (dec, "FEC enabled, decoding last delayed buffer"); n = opus_multistream_decode (dec->state, data, size, out_data, samples, 0); } else { /* FEC reconstruction decode */ GST_LOG_OBJECT (dec, "FEC enabled, reconstructing last buffer"); n = opus_multistream_decode (dec->state, data, size, out_data, samples, 1); } } else { /* normal decode */ GST_LOG_OBJECT (dec, "FEC disabled, decoding buffer"); n = opus_multistream_decode (dec->state, data, size, out_data, samples, 0); } if (n == OPUS_BUFFER_TOO_SMALL) { /* if too small, add 2.5 milliseconds and try again, up to the * Opus max size of 120 milliseconds */ if (samples >= 120 * dec->sample_rate / 1000) break; samples += 25 * dec->sample_rate / 10000; packet_size = samples * dec->n_channels * 2; gst_buffer_unmap (outbuf, &omap); gst_buffer_unref (outbuf); outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (dec), packet_size); if (!outbuf) { goto buffer_failed; } gst_buffer_map (outbuf, &omap, GST_MAP_WRITE); out_data = (gint16 *) omap.data; } } while (n == OPUS_BUFFER_TOO_SMALL); gst_buffer_unmap (outbuf, &omap); if (data != NULL) gst_buffer_unmap (buf, &map); if (n < 0) { GstFlowReturn ret = GST_FLOW_ERROR; gst_buffer_unref (outbuf); GST_AUDIO_DECODER_ERROR (dec, 1, STREAM, DECODE, (NULL), ("Decoding error (%d): %s", n, opus_strerror (n)), ret); return ret; } GST_DEBUG_OBJECT (dec, "decoded %d samples", n); gst_buffer_set_size (outbuf, n * 2 * dec->n_channels); GST_BUFFER_DURATION (outbuf) = samples * GST_SECOND / dec->sample_rate; samples = n; cmeta = gst_buffer_get_audio_clipping_meta (buf); g_assert (!cmeta || cmeta->format == GST_FORMAT_DEFAULT); /* Skip any samples that need skipping */ if (cmeta && cmeta->start) { guint pre_skip = cmeta->start; guint scaled_pre_skip = pre_skip * dec->sample_rate / 48000; guint skip = scaled_pre_skip > n ? n : scaled_pre_skip; guint scaled_skip = skip * 48000 / dec->sample_rate; gst_buffer_resize (outbuf, skip * 2 * dec->n_channels, -1); GST_INFO_OBJECT (dec, "Skipping %u samples at the beginning (%u at 48000 Hz)", skip, scaled_skip); } if (cmeta && cmeta->end) { guint post_skip = cmeta->end; guint scaled_post_skip = post_skip * dec->sample_rate / 48000; guint skip = scaled_post_skip > n ? n : scaled_post_skip; guint scaled_skip = skip * 48000 / dec->sample_rate; guint outsize = gst_buffer_get_size (outbuf); guint skip_bytes = skip * 2 * dec->n_channels; if (outsize > skip_bytes) outsize -= skip_bytes; else outsize = 0; gst_buffer_resize (outbuf, 0, outsize); GST_INFO_OBJECT (dec, "Skipping %u samples at the end (%u at 48000 Hz)", skip, scaled_skip); } if (gst_buffer_get_size (outbuf) == 0) { gst_buffer_unref (outbuf); outbuf = NULL; } else if (dec->opus_pos[0] != GST_AUDIO_CHANNEL_POSITION_INVALID) { gst_audio_buffer_reorder_channels (outbuf, GST_AUDIO_FORMAT_S16, dec->n_channels, dec->opus_pos, dec->info.position); } /* Apply gain */ /* Would be better off leaving this to a volume element, as this is a naive conversion that does too many int/float conversions. However, we don't have control over the pipeline... So make it optional if the user program wants to use a volume, but do it by default so the correct volume goes out by default */ if (dec->apply_gain && outbuf && dec->r128_gain) { gsize rsize; unsigned int i, nsamples; double volume = dec->r128_gain_volume; gint16 *samples; gst_buffer_map (outbuf, &omap, GST_MAP_READWRITE); samples = (gint16 *) omap.data; rsize = omap.size; GST_DEBUG_OBJECT (dec, "Applying gain: volume %f", volume); nsamples = rsize / 2; for (i = 0; i < nsamples; ++i) { int sample = (int) (samples[i] * volume + 0.5); samples[i] = sample < -32768 ? -32768 : sample > 32767 ? 32767 : sample; } gst_buffer_unmap (outbuf, &omap); } if (dec->use_inband_fec) { gst_buffer_replace (&dec->last_buffer, buffer); } res = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (dec), outbuf, 1); if (res != GST_FLOW_OK) GST_DEBUG_OBJECT (dec, "flow: %s", gst_flow_get_name (res)); done: return res; creation_failed: GST_ELEMENT_ERROR (dec, LIBRARY, INIT, ("Failed to create Opus decoder"), ("Failed to create Opus decoder (%d): %s", err, opus_strerror (err))); return GST_FLOW_ERROR; buffer_failed: GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("Failed to create %u byte buffer", packet_size)); return GST_FLOW_ERROR; }
static void gst_amc_audio_dec_loop (GstAmcAudioDec * self) { GstFlowReturn flow_ret = GST_FLOW_OK; gboolean is_eos; GstAmcBufferInfo buffer_info; gint idx; GST_AUDIO_DECODER_STREAM_LOCK (self); retry: /*if (self->input_caps_changed) { idx = INFO_OUTPUT_FORMAT_CHANGED; } else { */ GST_DEBUG_OBJECT (self, "Waiting for available output buffer"); GST_AUDIO_DECODER_STREAM_UNLOCK (self); /* Wait at most 100ms here, some codecs don't fail dequeueing if * the codec is flushing, causing deadlocks during shutdown */ idx = gst_amc_codec_dequeue_output_buffer (self->codec, &buffer_info, 100000); GST_AUDIO_DECODER_STREAM_LOCK (self); /*} */ if (idx < 0) { if (self->flushing) goto flushing; switch (idx) { case INFO_OUTPUT_BUFFERS_CHANGED:{ GST_DEBUG_OBJECT (self, "Output buffers have changed"); if (self->output_buffers) gst_amc_codec_free_buffers (self->output_buffers, self->n_output_buffers); self->output_buffers = gst_amc_codec_get_output_buffers (self->codec, &self->n_output_buffers); if (!self->output_buffers) goto get_output_buffers_error; break; } case INFO_OUTPUT_FORMAT_CHANGED:{ GstAmcFormat *format; gchar *format_string; GST_DEBUG_OBJECT (self, "Output format has changed"); format = gst_amc_codec_get_output_format (self->codec); if (!format) goto format_error; format_string = gst_amc_format_to_string (format); GST_DEBUG_OBJECT (self, "Got new output format: %s", format_string); g_free (format_string); if (!gst_amc_audio_dec_set_src_caps (self, format)) { gst_amc_format_free (format); goto format_error; } gst_amc_format_free (format); if (self->output_buffers) gst_amc_codec_free_buffers (self->output_buffers, self->n_output_buffers); self->output_buffers = gst_amc_codec_get_output_buffers (self->codec, &self->n_output_buffers); if (!self->output_buffers) goto get_output_buffers_error; goto retry; break; } case INFO_TRY_AGAIN_LATER: GST_DEBUG_OBJECT (self, "Dequeueing output buffer timed out"); goto retry; break; case G_MININT: GST_ERROR_OBJECT (self, "Failure dequeueing output buffer"); goto dequeue_error; break; default: g_assert_not_reached (); break; } goto retry; } GST_DEBUG_OBJECT (self, "Got output buffer at index %d: size %d time %" G_GINT64_FORMAT " flags 0x%08x", idx, buffer_info.size, buffer_info.presentation_time_us, buffer_info.flags); is_eos = ! !(buffer_info.flags & BUFFER_FLAG_END_OF_STREAM); self->n_buffers++; if (buffer_info.size > 0) { GstAmcAudioDecClass *klass = GST_AMC_AUDIO_DEC_GET_CLASS (self); GstBuffer *outbuf; GstAmcBuffer *buf; GstMapInfo minfo; /* This sometimes happens at EOS or if the input is not properly framed, * let's handle it gracefully by allocating a new buffer for the current * caps and filling it */ if (idx >= self->n_output_buffers) goto invalid_buffer_index; if (strcmp (klass->codec_info->name, "OMX.google.mp3.decoder") == 0) { /* Google's MP3 decoder outputs garbage in the first output buffer * so we just drop it here */ if (self->n_buffers == 1) { GST_DEBUG_OBJECT (self, "Skipping first buffer of Google MP3 decoder output"); goto done; } } outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (self), buffer_info.size); if (!outbuf) goto failed_allocate; gst_buffer_map (outbuf, &minfo, GST_MAP_WRITE); buf = &self->output_buffers[idx]; if (self->needs_reorder) { gint i, n_samples, c, n_channels; gint *reorder_map = self->reorder_map; gint16 *dest, *source; dest = (gint16 *) minfo.data; source = (gint16 *) (buf->data + buffer_info.offset); n_samples = buffer_info.size / self->info.bpf; n_channels = self->info.channels; for (i = 0; i < n_samples; i++) { for (c = 0; c < n_channels; c++) { dest[i * n_channels + reorder_map[c]] = source[i * n_channels + c]; } } } else { orc_memcpy (minfo.data, buf->data + buffer_info.offset, buffer_info.size); } gst_buffer_unmap (outbuf, &minfo); /* FIXME: We should get one decoded input frame here for * every buffer. If this is not the case somewhere, we will * error out at some point and will need to add workarounds */ flow_ret = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (self), outbuf, 1); } done: if (!gst_amc_codec_release_output_buffer (self->codec, idx)) goto failed_release; if (is_eos || flow_ret == GST_FLOW_EOS) { GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); if (self->draining) { GST_DEBUG_OBJECT (self, "Drained"); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); } else if (flow_ret == GST_FLOW_OK) { GST_DEBUG_OBJECT (self, "Component signalled EOS"); flow_ret = GST_FLOW_EOS; } g_mutex_unlock (&self->drain_lock); GST_AUDIO_DECODER_STREAM_LOCK (self); } else { GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret)); } self->downstream_flow_ret = flow_ret; if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; dequeue_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("Failed to dequeue output buffer")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } get_output_buffers_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("Failed to get output buffers")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } format_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("Failed to handle format")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } failed_release: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("Failed to release output buffer index %d", idx)); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } flushing: { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_FLUSHING; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } flow_error: { if (flow_ret == GST_FLOW_EOS) { GST_DEBUG_OBJECT (self, "EOS"); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); } else if (flow_ret == GST_FLOW_NOT_LINKED || flow_ret < GST_FLOW_EOS) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); } GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } invalid_buffer_index: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("Invalid input buffer index %d of %d", idx, self->n_input_buffers)); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } failed_allocate: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to allocate output buffer")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } }
static GstFlowReturn gst_siren_dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buf) { GstSirenDec *dec; GstFlowReturn ret = GST_FLOW_OK; GstBuffer *out_buf; guint8 *in_data, *out_data; guint i, size, num_frames; gint out_size, in_size; gint decode_ret; GstMapInfo inmap, outmap; dec = GST_SIREN_DEC (bdec); size = gst_buffer_get_size (buf); GST_LOG_OBJECT (dec, "Received buffer of size %u", size); g_return_val_if_fail (size % 40 == 0, GST_FLOW_ERROR); g_return_val_if_fail (size > 0, GST_FLOW_ERROR); /* process 40 input bytes into 640 output bytes */ num_frames = size / 40; /* this is the input/output size */ in_size = num_frames * 40; out_size = num_frames * 640; GST_LOG_OBJECT (dec, "we have %u frames, %u in, %u out", num_frames, in_size, out_size); out_buf = gst_audio_decoder_allocate_output_buffer (bdec, out_size); if (out_buf == NULL) goto alloc_failed; /* get the input data for all the frames */ gst_buffer_map (buf, &inmap, GST_MAP_READ); gst_buffer_map (out_buf, &outmap, GST_MAP_WRITE); in_data = inmap.data; out_data = outmap.data; for (i = 0; i < num_frames; i++) { GST_LOG_OBJECT (dec, "Decoding frame %u/%u", i, num_frames); /* decode 40 input bytes to 640 output bytes */ decode_ret = Siren7_DecodeFrame (dec->decoder, in_data, out_data); if (decode_ret != 0) goto decode_error; /* move to next frame */ out_data += 640; in_data += 40; } gst_buffer_unmap (buf, &inmap); gst_buffer_unmap (out_buf, &outmap); GST_LOG_OBJECT (dec, "Finished decoding"); /* might really be multiple frames, * but was treated as one for all purposes here */ ret = gst_audio_decoder_finish_frame (bdec, out_buf, 1); done: return ret; /* ERRORS */ alloc_failed: { GST_DEBUG_OBJECT (dec, "failed to pad_alloc buffer: %d (%s)", ret, gst_flow_get_name (ret)); goto done; } decode_error: { GST_AUDIO_DECODER_ERROR (bdec, 1, STREAM, DECODE, (NULL), ("Error decoding frame: %d", decode_ret), ret); if (ret == GST_FLOW_OK) gst_audio_decoder_finish_frame (bdec, NULL, 1); gst_buffer_unref (out_buf); goto done; } }
static gint gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec, AVCodec * in_plugin, guint8 * data, guint size, GstBuffer ** outbuf, GstFlowReturn * ret) { gint len = -1; gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE; AVPacket packet; AVFrame frame; GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size); gst_avpacket_init (&packet, data, size); memset (&frame, 0, sizeof (frame)); avcodec_get_frame_defaults (&frame); len = avcodec_decode_audio4 (ffmpegdec->context, &frame, &have_data, &packet); GST_DEBUG_OBJECT (ffmpegdec, "Decode audio: len=%d, have_data=%d", len, have_data); if (len >= 0 && have_data > 0) { BufferInfo *buffer_info = frame.opaque; gint nsamples, channels, byte_per_sample; gsize output_size; if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) { *outbuf = NULL; *ret = GST_FLOW_NOT_NEGOTIATED; len = -1; goto beach; } channels = ffmpegdec->info.channels; nsamples = frame.nb_samples; byte_per_sample = ffmpegdec->info.finfo->width / 8; /* frame.linesize[0] might contain padding, allocate only what's needed */ output_size = nsamples * byte_per_sample * channels; GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer"); if (buffer_info) { *outbuf = buffer_info->buffer; gst_buffer_unmap (buffer_info->buffer, &buffer_info->map); g_slice_free (BufferInfo, buffer_info); frame.opaque = NULL; } else if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt) && channels > 1) { gint i, j; GstMapInfo minfo; /* note: linesize[0] might contain padding, allocate only what's needed */ *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE); switch (ffmpegdec->info.finfo->width) { case 8:{ guint8 *odata = minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint8 *) frame.extended_data[j])[i]; } odata += channels; } break; } case 16:{ guint16 *odata = (guint16 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint16 *) frame.extended_data[j])[i]; } odata += channels; } break; } case 32:{ guint32 *odata = (guint32 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint32 *) frame.extended_data[j])[i]; } odata += channels; } break; } case 64:{ guint64 *odata = (guint64 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint64 *) frame.extended_data[j])[i]; } odata += channels; } break; } default: g_assert_not_reached (); break; } gst_buffer_unmap (*outbuf, &minfo); } else { *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_fill (*outbuf, 0, frame.data[0], output_size); } GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %d", have_data); /* Reorder channels to the GStreamer channel order */ if (ffmpegdec->needs_reorder) { *outbuf = gst_buffer_make_writable (*outbuf); gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format, ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout, ffmpegdec->info.position); } } else { *outbuf = NULL; } beach: GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d", *ret, *outbuf, len); return len; }
static gint gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec, AVCodec * in_plugin, guint8 * data, guint size, gint * have_data, GstBuffer ** outbuf, GstFlowReturn * ret) { gint len = -1; AVPacket packet; GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size); gst_avpacket_init (&packet, data, size); len = avcodec_decode_audio4 (ffmpegdec->context, ffmpegdec->frame, have_data, &packet); GST_DEBUG_OBJECT (ffmpegdec, "Decode audio: len=%d, have_data=%d", len, *have_data); if (len >= 0 && *have_data) { gint nsamples, channels, byte_per_sample; gsize output_size; if (!gst_ffmpegauddec_negotiate (ffmpegdec, ffmpegdec->context, ffmpegdec->frame, FALSE)) { *outbuf = NULL; *ret = GST_FLOW_NOT_NEGOTIATED; len = -1; goto beach; } channels = ffmpegdec->info.channels; nsamples = ffmpegdec->frame->nb_samples; byte_per_sample = ffmpegdec->info.finfo->width / 8; /* ffmpegdec->frame->linesize[0] might contain padding, allocate only what's needed */ output_size = nsamples * byte_per_sample * channels; GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer"); if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt) && channels > 1) { gint i, j; GstMapInfo minfo; /* note: linesize[0] might contain padding, allocate only what's needed */ *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE); switch (ffmpegdec->info.finfo->width) { case 8:{ guint8 *odata = minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint8 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } case 16:{ guint16 *odata = (guint16 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint16 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } case 32:{ guint32 *odata = (guint32 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint32 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } case 64:{ guint64 *odata = (guint64 *) minfo.data; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { odata[j] = ((const guint64 *) ffmpegdec->frame->extended_data[j])[i]; } odata += channels; } break; } default: g_assert_not_reached (); break; } gst_buffer_unmap (*outbuf, &minfo); } else { *outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec), output_size); gst_buffer_fill (*outbuf, 0, ffmpegdec->frame->data[0], output_size); } GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %" G_GSIZE_FORMAT, output_size); /* Reorder channels to the GStreamer channel order */ if (ffmpegdec->needs_reorder) { *outbuf = gst_buffer_make_writable (*outbuf); gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format, ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout, ffmpegdec->info.position); } /* Mark corrupted frames as corrupted */ if (ffmpegdec->frame->flags & AV_FRAME_FLAG_CORRUPT) GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_CORRUPTED); } else { *outbuf = NULL; } beach: av_frame_unref (ffmpegdec->frame); GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d", *ret, *outbuf, len); return len; }
static GstFlowReturn gst_sbc_dec_handle_frame (GstAudioDecoder * audio_dec, GstBuffer * buf) { GstSbcDec *dec = GST_SBC_DEC (audio_dec); GstBuffer *outbuf = NULL; GstMapInfo out_map; GstMapInfo in_map; gsize output_size; guint num_frames, i; /* no fancy draining */ if (G_UNLIKELY (buf == NULL)) return GST_FLOW_OK; if (G_UNLIKELY (dec->frame_len == 0)) return GST_FLOW_NOT_NEGOTIATED; gst_buffer_map (buf, &in_map, GST_MAP_READ); if (G_UNLIKELY (in_map.size == 0)) goto done; /* we assume all frames are of the same size, this is implied by the * input caps applying to the whole input buffer, and the parser should * also have made sure of that */ if (G_UNLIKELY (in_map.size % dec->frame_len != 0)) goto mixed_frames; num_frames = in_map.size / dec->frame_len; output_size = num_frames * dec->samples_per_frame * sizeof (gint16); outbuf = gst_audio_decoder_allocate_output_buffer (audio_dec, output_size); if (outbuf == NULL) goto no_buffer; gst_buffer_map (outbuf, &out_map, GST_MAP_WRITE); for (i = 0; i < num_frames; ++i) { gssize ret; gsize written; ret = sbc_decode (&dec->sbc, in_map.data + (i * dec->frame_len), dec->frame_len, out_map.data + (i * dec->samples_per_frame * 2), dec->samples_per_frame * 2, &written); if (ret <= 0 || written != (dec->samples_per_frame * 2)) { GST_WARNING_OBJECT (dec, "decoding error, ret = %" G_GSSIZE_FORMAT ", " "written = %" G_GSSIZE_FORMAT, ret, written); break; } } gst_buffer_unmap (outbuf, &out_map); if (i > 0) gst_buffer_set_size (outbuf, i * dec->samples_per_frame * 2); else gst_buffer_replace (&outbuf, NULL); done: gst_buffer_unmap (buf, &in_map); return gst_audio_decoder_finish_frame (audio_dec, outbuf, 1); /* ERRORS */ mixed_frames: { GST_WARNING_OBJECT (dec, "inconsistent input data/frames, skipping"); goto done; } no_buffer: { GST_ERROR_OBJECT (dec, "could not allocate output buffer"); goto done; } }
static void gst_droidadec_data_available (void *data, DroidMediaCodecData * encoded) { GstFlowReturn flow_ret; GstDroidADec *dec = (GstDroidADec *) data; GstAudioDecoder *decoder = GST_AUDIO_DECODER (dec); GstBuffer *out; GstMapInfo info; GST_DEBUG_OBJECT (dec, "data available of size %d", encoded->data.size); GST_AUDIO_DECODER_STREAM_LOCK (decoder); if (G_UNLIKELY (dec->downstream_flow_ret != GST_FLOW_OK)) { GST_DEBUG_OBJECT (dec, "not handling data in error state: %s", gst_flow_get_name (dec->downstream_flow_ret)); flow_ret = dec->downstream_flow_ret; gst_audio_decoder_finish_frame (decoder, NULL, 1); goto out; } if (G_UNLIKELY (gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (dec))->finfo->format == GST_AUDIO_FORMAT_UNKNOWN)) { DroidMediaCodecMetaData md; DroidMediaRect crop; /* TODO: get rid of that */ GstAudioInfo info; memset (&md, 0x0, sizeof (md)); droid_media_codec_get_output_info (dec->codec, &md, &crop); GST_INFO_OBJECT (dec, "output rate=%d, output channels=%d", md.sample_rate, md.channels); gst_audio_info_init (&info); gst_audio_info_set_format (&info, GST_AUDIO_FORMAT_S16, md.sample_rate, md.channels, NULL); if (!gst_audio_decoder_set_output_format (decoder, &info)) { flow_ret = GST_FLOW_ERROR; goto out; } dec->info = gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (dec)); } out = gst_audio_decoder_allocate_output_buffer (decoder, encoded->data.size); gst_buffer_map (out, &info, GST_MAP_READWRITE); orc_memcpy (info.data, encoded->data.data, encoded->data.size); gst_buffer_unmap (out, &info); // GST_WARNING_OBJECT (dec, "bpf %d, bps %d", dec->info->bpf, GST_AUDIO_INFO_BPS(dec->info)); if (dec->spf == -1 || (encoded->data.size == dec->spf * dec->info->bpf && gst_adapter_available (dec->adapter) == 0)) { /* fast path. no need for anything */ goto push; } gst_adapter_push (dec->adapter, out); if (gst_adapter_available (dec->adapter) >= dec->spf * dec->info->bpf) { out = gst_adapter_take_buffer (dec->adapter, dec->spf * dec->info->bpf); } else { flow_ret = GST_FLOW_OK; goto out; } push: GST_DEBUG_OBJECT (dec, "pushing %d bytes out", gst_buffer_get_size (out)); flow_ret = gst_audio_decoder_finish_frame (decoder, out, 1); if (flow_ret == GST_FLOW_OK || flow_ret == GST_FLOW_FLUSHING) { goto out; } else if (flow_ret == GST_FLOW_EOS) { GST_INFO_OBJECT (dec, "eos"); } else if (flow_ret < GST_FLOW_OK) { GST_ELEMENT_ERROR (dec, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); } out: dec->downstream_flow_ret = flow_ret; GST_AUDIO_DECODER_STREAM_UNLOCK (decoder); }
static void gst_omx_audio_dec_loop (GstOMXAudioDec * self) { GstOMXPort *port = self->dec_out_port; GstOMXBuffer *buf = NULL; GstFlowReturn flow_ret = GST_FLOW_OK; GstOMXAcquireBufferReturn acq_return; OMX_ERRORTYPE err; acq_return = gst_omx_port_acquire_buffer (port, &buf); if (acq_return == GST_OMX_ACQUIRE_BUFFER_ERROR) { goto component_error; } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_FLUSHING) { goto flushing; } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_EOS) { goto eos; } if (!gst_pad_has_current_caps (GST_AUDIO_DECODER_SRC_PAD (self)) || acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) { OMX_PARAM_PORTDEFINITIONTYPE port_def; OMX_AUDIO_PARAM_PCMMODETYPE pcm_param; GstAudioChannelPosition omx_position[OMX_AUDIO_MAXCHANNELS]; GstOMXAudioDecClass *klass = GST_OMX_AUDIO_DEC_GET_CLASS (self); gint i; GST_DEBUG_OBJECT (self, "Port settings have changed, updating caps"); /* Reallocate all buffers */ if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE && gst_omx_port_is_enabled (port)) { err = gst_omx_port_set_enabled (port, FALSE); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_buffers_released (port, 5 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_deallocate_buffers (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_enabled (port, 1 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; } /* Just update caps */ GST_AUDIO_DECODER_STREAM_LOCK (self); gst_omx_port_get_port_definition (port, &port_def); g_assert (port_def.format.audio.eEncoding == OMX_AUDIO_CodingPCM); GST_OMX_INIT_STRUCT (&pcm_param); pcm_param.nPortIndex = self->dec_out_port->index; err = gst_omx_component_get_parameter (self->dec, OMX_IndexParamAudioPcm, &pcm_param); if (err != OMX_ErrorNone) { GST_ERROR_OBJECT (self, "Failed to get PCM parameters: %s (0x%08x)", gst_omx_error_to_string (err), err); goto caps_failed; } g_assert (pcm_param.ePCMMode == OMX_AUDIO_PCMModeLinear); g_assert (pcm_param.bInterleaved == OMX_TRUE); gst_audio_info_init (&self->info); for (i = 0; i < pcm_param.nChannels; i++) { switch (pcm_param.eChannelMapping[i]) { case OMX_AUDIO_ChannelLF: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT; break; case OMX_AUDIO_ChannelRF: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT; break; case OMX_AUDIO_ChannelCF: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER; break; case OMX_AUDIO_ChannelLS: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT; break; case OMX_AUDIO_ChannelRS: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT; break; case OMX_AUDIO_ChannelLFE: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_LFE1; break; case OMX_AUDIO_ChannelCS: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_CENTER; break; case OMX_AUDIO_ChannelLR: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_LEFT; break; case OMX_AUDIO_ChannelRR: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT; break; case OMX_AUDIO_ChannelNone: default: /* This will break the outer loop too as the * i == pcm_param.nChannels afterwards */ for (i = 0; i < pcm_param.nChannels; i++) omx_position[i] = GST_AUDIO_CHANNEL_POSITION_NONE; break; } } if (pcm_param.nChannels == 1 && omx_position[0] == GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER) omx_position[0] = GST_AUDIO_CHANNEL_POSITION_MONO; if (omx_position[0] == GST_AUDIO_CHANNEL_POSITION_NONE && klass->get_channel_positions) { GST_WARNING_OBJECT (self, "Failed to get a valid channel layout, trying fallback"); klass->get_channel_positions (self, self->dec_out_port, omx_position); } memcpy (self->position, omx_position, sizeof (omx_position)); gst_audio_channel_positions_to_valid_order (self->position, pcm_param.nChannels); self->needs_reorder = (memcmp (self->position, omx_position, sizeof (GstAudioChannelPosition) * pcm_param.nChannels) != 0); if (self->needs_reorder) gst_audio_get_channel_reorder_map (pcm_param.nChannels, self->position, omx_position, self->reorder_map); gst_audio_info_set_format (&self->info, gst_audio_format_build_integer (pcm_param.eNumData == OMX_NumericalDataSigned, pcm_param.eEndian == OMX_EndianLittle ? G_LITTLE_ENDIAN : G_BIG_ENDIAN, pcm_param.nBitPerSample, pcm_param.nBitPerSample), pcm_param.nSamplingRate, pcm_param.nChannels, self->position); GST_DEBUG_OBJECT (self, "Setting output state: format %s, rate %u, channels %u", gst_audio_format_to_string (self->info.finfo->format), (guint) pcm_param.nSamplingRate, (guint) pcm_param.nChannels); if (!gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (self), &self->info) || !gst_audio_decoder_negotiate (GST_AUDIO_DECODER (self))) { if (buf) gst_omx_port_release_buffer (port, buf); goto caps_failed; } GST_AUDIO_DECODER_STREAM_UNLOCK (self); if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) { err = gst_omx_port_set_enabled (port, TRUE); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_allocate_buffers (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_enabled (port, 5 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_populate (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_mark_reconfigured (port); if (err != OMX_ErrorNone) goto reconfigure_error; } /* Now get a buffer */ if (acq_return != GST_OMX_ACQUIRE_BUFFER_OK) { return; } } g_assert (acq_return == GST_OMX_ACQUIRE_BUFFER_OK); if (!buf) { g_assert ((klass->cdata.hacks & GST_OMX_HACK_NO_EMPTY_EOS_BUFFER)); GST_AUDIO_DECODER_STREAM_LOCK (self); goto eos; } /* This prevents a deadlock between the srcpad stream * lock and the audiocodec stream lock, if ::reset() * is called at the wrong time */ if (gst_omx_port_is_flushing (port)) { GST_DEBUG_OBJECT (self, "Flushing"); gst_omx_port_release_buffer (port, buf); goto flushing; } GST_DEBUG_OBJECT (self, "Handling buffer: 0x%08x %" G_GUINT64_FORMAT, (guint) buf->omx_buf->nFlags, (guint64) buf->omx_buf->nTimeStamp); GST_AUDIO_DECODER_STREAM_LOCK (self); if (buf->omx_buf->nFilledLen > 0) { GstBuffer *outbuf; gint nframes, spf; GstMapInfo minfo; GstOMXAudioDecClass *klass = GST_OMX_AUDIO_DEC_GET_CLASS (self); GST_DEBUG_OBJECT (self, "Handling output data"); if (buf->omx_buf->nFilledLen % self->info.bpf != 0) { gst_omx_port_release_buffer (port, buf); goto invalid_buffer; } outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (self), buf->omx_buf->nFilledLen); gst_buffer_map (outbuf, &minfo, GST_MAP_WRITE); if (self->needs_reorder) { gint i, n_samples, c, n_channels; gint *reorder_map = self->reorder_map; gint16 *dest, *source; dest = (gint16 *) minfo.data; source = (gint16 *) (buf->omx_buf->pBuffer + buf->omx_buf->nOffset); n_samples = buf->omx_buf->nFilledLen / self->info.bpf; n_channels = self->info.channels; for (i = 0; i < n_samples; i++) { for (c = 0; c < n_channels; c++) { dest[i * n_channels + reorder_map[c]] = source[i * n_channels + c]; } } } else { memcpy (minfo.data, buf->omx_buf->pBuffer + buf->omx_buf->nOffset, buf->omx_buf->nFilledLen); } gst_buffer_unmap (outbuf, &minfo); nframes = 1; spf = klass->get_samples_per_frame (self, self->dec_out_port); if (spf != -1) { nframes = buf->omx_buf->nFilledLen / self->info.bpf; if (nframes % spf != 0) GST_WARNING_OBJECT (self, "Output buffer does not contain an integer " "number of input frames (frames: %d, spf: %d)", nframes, spf); nframes = (nframes + spf - 1) / spf; } GST_BUFFER_TIMESTAMP (outbuf) = gst_util_uint64_scale (buf->omx_buf->nTimeStamp, GST_SECOND, OMX_TICKS_PER_SECOND); if (buf->omx_buf->nTickCount != 0) GST_BUFFER_DURATION (outbuf) = gst_util_uint64_scale (buf->omx_buf->nTickCount, GST_SECOND, OMX_TICKS_PER_SECOND); flow_ret = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (self), outbuf, nframes); } GST_DEBUG_OBJECT (self, "Read frame from component"); GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret)); if (buf) { err = gst_omx_port_release_buffer (port, buf); if (err != OMX_ErrorNone) goto release_error; } self->downstream_flow_ret = flow_ret; if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; component_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("OpenMAX component in error state %s (0x%08x)", gst_omx_component_get_last_error_string (self->dec), gst_omx_component_get_last_error (self->dec))); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; return; } flushing: { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_FLUSHING; self->started = FALSE; return; } eos: { g_mutex_lock (&self->drain_lock); if (self->draining) { GST_DEBUG_OBJECT (self, "Drained"); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); flow_ret = GST_FLOW_OK; gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); } else { GST_DEBUG_OBJECT (self, "Component signalled EOS"); flow_ret = GST_FLOW_EOS; } g_mutex_unlock (&self->drain_lock); GST_AUDIO_DECODER_STREAM_LOCK (self); self->downstream_flow_ret = flow_ret; /* Here we fallback and pause the task for the EOS case */ if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } flow_error: { if (flow_ret == GST_FLOW_EOS) { GST_DEBUG_OBJECT (self, "EOS"); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->started = FALSE; } else if (flow_ret < GST_FLOW_EOS) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->started = FALSE; } else if (flow_ret == GST_FLOW_FLUSHING) { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->started = FALSE; } GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } reconfigure_error: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Unable to reconfigure output port")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; return; } invalid_buffer: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Invalid sized input buffer")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } caps_failed: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to set caps")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); GST_AUDIO_DECODER_STREAM_UNLOCK (self); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; return; } release_error: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to relase output buffer to component: %s (0x%08x)", gst_omx_error_to_string (err), err)); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } }
static GstFlowReturn gst_imx_audio_uniaudio_dec_handle_frame(GstAudioDecoder *dec, GstBuffer *buffer) { GstMapInfo in_map; GstBuffer *out_buffer; gsize avail_out_size; GstImxAudioUniaudioDec *imx_audio_uniaudio_dec = GST_IMX_AUDIO_UNIAUDIO_DEC(dec); int32 dec_ret; uint32 offset = 0; uint8 *in_buf = NULL; uint32 in_size = 0; gboolean dec_loop = TRUE, flow_error = FALSE; /* With some formats such as Vorbis, the first few buffers are actually redundant, * since they contain codec data that was already specified in codec_data or * streamheader caps earlier. If this is the case, skip these buffers. */ if (imx_audio_uniaudio_dec->skip_header_counter < imx_audio_uniaudio_dec->num_vorbis_headers) { GST_TRACE_OBJECT(dec, "skipping header buffer #%u", imx_audio_uniaudio_dec->skip_header_counter); ++imx_audio_uniaudio_dec->skip_header_counter; return gst_audio_decoder_finish_frame(dec, NULL, 1); } if (buffer != NULL) { gst_buffer_map(buffer, &in_map, GST_MAP_READ); in_buf = in_map.data; in_size = in_map.size; } while (dec_loop) { GstBuffer *tmp_buf; uint8 *out_buf = NULL; uint32 out_size = 0; if (buffer != NULL) GST_TRACE_OBJECT(dec, "feeding %" G_GUINT32_FORMAT " bytes to the decoder", (guint32)in_size); else GST_TRACE_OBJECT(dec, "draining decoder"); dec_ret = imx_audio_uniaudio_dec->codec->decode_frame( imx_audio_uniaudio_dec->handle, in_buf, in_size, &offset, &out_buf, &out_size ); GST_TRACE_OBJECT(dec, "decode_frame: return 0x%x offset %" G_GUINT32_FORMAT " out_size %" G_GUINT32_FORMAT, (unsigned int)dec_ret, (guint32)offset, (guint32)out_size); if ((out_buf != NULL) && (out_size > 0)) { tmp_buf = gst_audio_decoder_allocate_output_buffer(dec, out_size); tmp_buf = gst_buffer_make_writable(tmp_buf); gst_buffer_fill(tmp_buf, 0, out_buf, out_size); gst_adapter_push(imx_audio_uniaudio_dec->out_adapter, tmp_buf); } if (out_buf != NULL) { gst_imx_audio_uniaudio_dec_free(out_buf); } if ((buffer != NULL) && (offset == in_map.size)) { dec_loop = FALSE; } switch (dec_ret) { case ACODEC_SUCCESS: break; case ACODEC_END_OF_STREAM: dec_loop = FALSE; break; case ACODEC_NOT_ENOUGH_DATA: break; case ACODEC_CAPIBILITY_CHANGE: break; default: { dec_loop = FALSE; flow_error = TRUE; GST_ELEMENT_ERROR(dec, STREAM, DECODE, ("could not decode"), ("error message: %s", imx_audio_uniaudio_dec->codec->get_last_error(imx_audio_uniaudio_dec->handle))); } } } if (buffer != NULL) gst_buffer_unmap(buffer, &in_map); if (flow_error) return GST_FLOW_ERROR; if (!(imx_audio_uniaudio_dec->has_audioinfo_set)) { UniACodecParameter parameter; GstAudioFormat pcm_fmt; GstAudioInfo audio_info; imx_audio_uniaudio_dec->codec->get_parameter(imx_audio_uniaudio_dec->handle, UNIA_OUTPUT_PCM_FORMAT, ¶meter); if ((parameter.outputFormat.width == 0) || (parameter.outputFormat.depth == 0)) { GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "no output format available yet"); return gst_audio_decoder_finish_frame(dec, NULL, 1); } GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "output sample width: %" G_GUINT32_FORMAT " depth: %" G_GUINT32_FORMAT, (guint32)(parameter.outputFormat.width), (guint32)(parameter.outputFormat.depth)); pcm_fmt = gst_audio_format_build_integer(TRUE, G_BYTE_ORDER, parameter.outputFormat.width, parameter.outputFormat.depth); GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "setting output format to: %s %d Hz %d channels", gst_audio_format_to_string(pcm_fmt), (gint)(parameter.outputFormat.samplerate), (gint)(parameter.outputFormat.channels)); gst_imx_audio_uniaudio_dec_clear_channel_positions(imx_audio_uniaudio_dec); gst_imx_audio_uniaudio_dec_fill_channel_positions(imx_audio_uniaudio_dec, parameter.outputFormat.layout, parameter.outputFormat.channels); imx_audio_uniaudio_dec->pcm_format = pcm_fmt; imx_audio_uniaudio_dec->num_channels = parameter.outputFormat.channels; gst_audio_info_set_format( &audio_info, pcm_fmt, parameter.outputFormat.samplerate, parameter.outputFormat.channels, imx_audio_uniaudio_dec->reordered_channel_positions ); gst_audio_decoder_set_output_format(dec, &audio_info); imx_audio_uniaudio_dec->has_audioinfo_set = TRUE; } avail_out_size = gst_adapter_available(imx_audio_uniaudio_dec->out_adapter); if (avail_out_size > 0) { out_buffer = gst_adapter_take_buffer(imx_audio_uniaudio_dec->out_adapter, avail_out_size); if (imx_audio_uniaudio_dec->original_channel_positions != imx_audio_uniaudio_dec->reordered_channel_positions) { gst_audio_buffer_reorder_channels( out_buffer, imx_audio_uniaudio_dec->pcm_format, imx_audio_uniaudio_dec->num_channels, imx_audio_uniaudio_dec->original_channel_positions, imx_audio_uniaudio_dec->reordered_channel_positions ); } return gst_audio_decoder_finish_frame(dec, out_buffer, 1); } else { return gst_audio_decoder_finish_frame(dec, NULL, 1); } }