GstCaps* getGstAudioCaps(int channels, float sampleRate) { return gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(sampleRate), "channels", G_TYPE_INT, channels, "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32), "layout", G_TYPE_STRING, "interleaved", nullptr); }
GstCaps* getGstAudioCaps(int channels, float sampleRate) { #ifdef GST_API_VERSION_1 return gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(sampleRate), "channels", G_TYPE_INT, channels, "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32), "layout", G_TYPE_STRING, "interleaved", NULL); #else return gst_caps_new_simple("audio/x-raw-float", "rate", G_TYPE_INT, static_cast<int>(sampleRate), "channels", G_TYPE_INT, channels, "endianness", G_TYPE_INT, G_BYTE_ORDER, "width", G_TYPE_INT, 32, NULL); #endif }
void AudioFileReader::plugDeinterleave(GstPad* pad) { // A decodebin pad was added, plug in a deinterleave element to // separate each planar channel. Sub pipeline looks like // ... decodebin2 ! audioconvert ! audioresample ! capsfilter ! deinterleave. GstElement* audioConvert = gst_element_factory_make("audioconvert", 0); GstElement* audioResample = gst_element_factory_make("audioresample", 0); GstElement* capsFilter = gst_element_factory_make("capsfilter", 0); m_deInterleave = gst_element_factory_make("deinterleave", "deinterleave"); g_object_set(m_deInterleave.get(), "keep-positions", TRUE, NULL); g_signal_connect(m_deInterleave.get(), "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this); g_signal_connect(m_deInterleave.get(), "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this); GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(m_sampleRate), "channels", G_TYPE_INT, 2, "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32), "layout", G_TYPE_STRING, "interleaved", nullptr); g_object_set(capsFilter, "caps", caps, NULL); gst_caps_unref(caps); gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioResample, capsFilter, m_deInterleave.get(), NULL); GstPad* sinkPad = gst_element_get_static_pad(audioConvert, "sink"); gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING); gst_object_unref(GST_OBJECT(sinkPad)); gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(capsFilter, "src", m_deInterleave.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_sync_state_with_parent(audioConvert); gst_element_sync_state_with_parent(audioResample); gst_element_sync_state_with_parent(capsFilter); gst_element_sync_state_with_parent(m_deInterleave.get()); }
static HRESULT WINAPI Gstreamer_AudioConvert_SetMediaType(TransformFilter *tf, PIN_DIRECTION dir, const AM_MEDIA_TYPE *amt) { GstTfImpl *This = (GstTfImpl*)tf; GstCaps *capsin, *capsout; AM_MEDIA_TYPE *outpmt = &This->tf.pmt; WAVEFORMATEX *inwfe; WAVEFORMATEX *outwfe; WAVEFORMATEXTENSIBLE *outwfx; GstAudioFormat format; HRESULT hr; BOOL inisfloat = FALSE; int indepth; TRACE("%p 0x%x %p\n", This, dir, amt); mark_wine_thread(); if (dir != PINDIR_INPUT) return S_OK; if (Gstreamer_AudioConvert_QueryConnect(&This->tf, amt) == S_FALSE || !amt->pbFormat) return E_FAIL; FreeMediaType(outpmt); *outpmt = *amt; outpmt->pUnk = NULL; outpmt->cbFormat = sizeof(WAVEFORMATEXTENSIBLE); outpmt->pbFormat = CoTaskMemAlloc(outpmt->cbFormat); inwfe = (WAVEFORMATEX*)amt->pbFormat; indepth = inwfe->wBitsPerSample; if (inwfe->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { WAVEFORMATEXTENSIBLE *inwfx = (WAVEFORMATEXTENSIBLE*)inwfe; inisfloat = IsEqualGUID(&inwfx->SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT); if (inwfx->Samples.wValidBitsPerSample) indepth = inwfx->Samples.wValidBitsPerSample; } else if (inwfe->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) inisfloat = TRUE; if (inisfloat) format = inwfe->wBitsPerSample == 64 ? GST_AUDIO_FORMAT_F64LE : GST_AUDIO_FORMAT_F32LE; else format = gst_audio_format_build_integer(inwfe->wBitsPerSample != 8, G_LITTLE_ENDIAN, inwfe->wBitsPerSample, indepth); capsin = gst_caps_new_simple("audio/x-raw", "format", G_TYPE_STRING, gst_audio_format_to_string(format), "channels", G_TYPE_INT, inwfe->nChannels, "rate", G_TYPE_INT, inwfe->nSamplesPerSec, NULL); outwfe = (WAVEFORMATEX*)outpmt->pbFormat; outwfx = (WAVEFORMATEXTENSIBLE*)outwfe; outwfe->wFormatTag = WAVE_FORMAT_EXTENSIBLE; outwfe->nChannels = 2; outwfe->nSamplesPerSec = inwfe->nSamplesPerSec; outwfe->wBitsPerSample = 16; outwfe->nBlockAlign = outwfe->nChannels * outwfe->wBitsPerSample / 8; outwfe->nAvgBytesPerSec = outwfe->nBlockAlign * outwfe->nSamplesPerSec; outwfe->cbSize = sizeof(*outwfx) - sizeof(*outwfe); outwfx->Samples.wValidBitsPerSample = outwfe->wBitsPerSample; outwfx->dwChannelMask = SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT; outwfx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; capsout = gst_caps_new_simple("audio/x-raw", "format", G_TYPE_STRING, "S16LE", "channels", G_TYPE_INT, outwfe->nChannels, "rate", G_TYPE_INT, outwfe->nSamplesPerSec, NULL); hr = Gstreamer_transform_ConnectInput(This, amt, capsin, capsout); gst_caps_unref(capsin); gst_caps_unref(capsout); This->cbBuffer = inwfe->nAvgBytesPerSec; return hr; }
/** * gst_audio_info_to_caps: * @info: a #GstAudioInfo * * Convert the values of @info into a #GstCaps. * * Returns: (transfer full): the new #GstCaps containing the * info of @info. */ GstCaps * gst_audio_info_to_caps (const GstAudioInfo * info) { GstCaps *caps; const gchar *format; const gchar *layout; GstAudioFlags flags; g_return_val_if_fail (info != NULL, NULL); g_return_val_if_fail (info->finfo != NULL, NULL); g_return_val_if_fail (info->finfo->format != GST_AUDIO_FORMAT_UNKNOWN, NULL); format = gst_audio_format_to_string (info->finfo->format); g_return_val_if_fail (format != NULL, NULL); if (info->layout == GST_AUDIO_LAYOUT_INTERLEAVED) layout = "interleaved"; else if (info->layout == GST_AUDIO_LAYOUT_NON_INTERLEAVED) layout = "non-interleaved"; else g_return_val_if_reached (NULL); flags = info->flags; if ((flags & GST_AUDIO_FLAG_UNPOSITIONED) && info->channels > 1 && info->position[0] != GST_AUDIO_CHANNEL_POSITION_NONE) { flags &= ~GST_AUDIO_FLAG_UNPOSITIONED; g_warning ("Unpositioned audio channel position flag set but " "channel positions present"); } else if (!(flags & GST_AUDIO_FLAG_UNPOSITIONED) && info->channels > 1 && info->position[0] == GST_AUDIO_CHANNEL_POSITION_NONE) { flags |= GST_AUDIO_FLAG_UNPOSITIONED; g_warning ("Unpositioned audio channel position flag not set " "but no channel positions present"); } caps = gst_caps_new_simple ("audio/x-raw", "format", G_TYPE_STRING, format, "layout", G_TYPE_STRING, layout, "rate", G_TYPE_INT, info->rate, "channels", G_TYPE_INT, info->channels, NULL); if (info->channels > 1 || info->position[0] != GST_AUDIO_CHANNEL_POSITION_MONO) { guint64 channel_mask = 0; if ((flags & GST_AUDIO_FLAG_UNPOSITIONED)) { channel_mask = 0; } else { if (!gst_audio_channel_positions_to_mask (info->position, info->channels, TRUE, &channel_mask)) goto invalid_channel_positions; } if (info->channels == 1 && info->position[0] == GST_AUDIO_CHANNEL_POSITION_MONO) { /* Default mono special case */ } else { gst_caps_set_simple (caps, "channel-mask", GST_TYPE_BITMASK, channel_mask, NULL); } } return caps; invalid_channel_positions: { GST_ERROR ("Invalid channel positions"); gst_caps_unref (caps); return NULL; } }
GstCaps *QGstUtils::capsForAudioFormat(const QAudioFormat &format) { if (!format.isValid()) return 0; #if GST_CHECK_VERSION(1,0,0) const QAudioFormat::SampleType sampleType = format.sampleType(); const QAudioFormat::Endian byteOrder = format.byteOrder(); const int sampleSize = format.sampleSize(); for (int i = 0; i < lengthOf(qt_audioLookup); ++i) { if (qt_audioLookup[i].sampleType != sampleType || qt_audioLookup[i].byteOrder != byteOrder || qt_audioLookup[i].sampleSize != sampleSize) { continue; } return gst_caps_new_simple( "audio/x-raw", "format" , G_TYPE_STRING, gst_audio_format_to_string(qt_audioLookup[i].format), "rate" , G_TYPE_INT , format.sampleRate(), "channels", G_TYPE_INT , format.channelCount(), NULL); } return 0; #else GstStructure *structure = 0; if (format.isValid()) { if (format.sampleType() == QAudioFormat::SignedInt || format.sampleType() == QAudioFormat::UnSignedInt) { structure = gst_structure_new("audio/x-raw-int", NULL); } else if (format.sampleType() == QAudioFormat::Float) { structure = gst_structure_new("audio/x-raw-float", NULL); } } GstCaps *caps = 0; if (structure) { gst_structure_set(structure, "rate", G_TYPE_INT, format.sampleRate(), NULL); gst_structure_set(structure, "channels", G_TYPE_INT, format.channelCount(), NULL); gst_structure_set(structure, "width", G_TYPE_INT, format.sampleSize(), NULL); gst_structure_set(structure, "depth", G_TYPE_INT, format.sampleSize(), NULL); if (format.byteOrder() == QAudioFormat::LittleEndian) gst_structure_set(structure, "endianness", G_TYPE_INT, 1234, NULL); else if (format.byteOrder() == QAudioFormat::BigEndian) gst_structure_set(structure, "endianness", G_TYPE_INT, 4321, NULL); if (format.sampleType() == QAudioFormat::SignedInt) gst_structure_set(structure, "signed", G_TYPE_BOOLEAN, TRUE, NULL); else if (format.sampleType() == QAudioFormat::UnSignedInt) gst_structure_set(structure, "signed", G_TYPE_BOOLEAN, FALSE, NULL); caps = gst_caps_new_empty(); Q_ASSERT(caps); gst_caps_append_structure(caps, structure); } return caps; #endif }
static void gst_omx_audio_dec_loop (GstOMXAudioDec * self) { GstOMXPort *port = self->dec_out_port; GstOMXBuffer *buf = NULL; GstFlowReturn flow_ret = GST_FLOW_OK; GstOMXAcquireBufferReturn acq_return; OMX_ERRORTYPE err; acq_return = gst_omx_port_acquire_buffer (port, &buf); if (acq_return == GST_OMX_ACQUIRE_BUFFER_ERROR) { goto component_error; } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_FLUSHING) { goto flushing; } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_EOS) { goto eos; } if (!gst_pad_has_current_caps (GST_AUDIO_DECODER_SRC_PAD (self)) || acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) { OMX_PARAM_PORTDEFINITIONTYPE port_def; OMX_AUDIO_PARAM_PCMMODETYPE pcm_param; GstAudioChannelPosition omx_position[OMX_AUDIO_MAXCHANNELS]; GstOMXAudioDecClass *klass = GST_OMX_AUDIO_DEC_GET_CLASS (self); gint i; GST_DEBUG_OBJECT (self, "Port settings have changed, updating caps"); /* Reallocate all buffers */ if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE && gst_omx_port_is_enabled (port)) { err = gst_omx_port_set_enabled (port, FALSE); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_buffers_released (port, 5 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_deallocate_buffers (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_enabled (port, 1 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; } /* Just update caps */ GST_AUDIO_DECODER_STREAM_LOCK (self); gst_omx_port_get_port_definition (port, &port_def); g_assert (port_def.format.audio.eEncoding == OMX_AUDIO_CodingPCM); GST_OMX_INIT_STRUCT (&pcm_param); pcm_param.nPortIndex = self->dec_out_port->index; err = gst_omx_component_get_parameter (self->dec, OMX_IndexParamAudioPcm, &pcm_param); if (err != OMX_ErrorNone) { GST_ERROR_OBJECT (self, "Failed to get PCM parameters: %s (0x%08x)", gst_omx_error_to_string (err), err); goto caps_failed; } g_assert (pcm_param.ePCMMode == OMX_AUDIO_PCMModeLinear); g_assert (pcm_param.bInterleaved == OMX_TRUE); gst_audio_info_init (&self->info); for (i = 0; i < pcm_param.nChannels; i++) { switch (pcm_param.eChannelMapping[i]) { case OMX_AUDIO_ChannelLF: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT; break; case OMX_AUDIO_ChannelRF: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT; break; case OMX_AUDIO_ChannelCF: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER; break; case OMX_AUDIO_ChannelLS: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT; break; case OMX_AUDIO_ChannelRS: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT; break; case OMX_AUDIO_ChannelLFE: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_LFE1; break; case OMX_AUDIO_ChannelCS: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_CENTER; break; case OMX_AUDIO_ChannelLR: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_LEFT; break; case OMX_AUDIO_ChannelRR: omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT; break; case OMX_AUDIO_ChannelNone: default: /* This will break the outer loop too as the * i == pcm_param.nChannels afterwards */ for (i = 0; i < pcm_param.nChannels; i++) omx_position[i] = GST_AUDIO_CHANNEL_POSITION_NONE; break; } } if (pcm_param.nChannels == 1 && omx_position[0] == GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER) omx_position[0] = GST_AUDIO_CHANNEL_POSITION_MONO; if (omx_position[0] == GST_AUDIO_CHANNEL_POSITION_NONE && klass->get_channel_positions) { GST_WARNING_OBJECT (self, "Failed to get a valid channel layout, trying fallback"); klass->get_channel_positions (self, self->dec_out_port, omx_position); } memcpy (self->position, omx_position, sizeof (omx_position)); gst_audio_channel_positions_to_valid_order (self->position, pcm_param.nChannels); self->needs_reorder = (memcmp (self->position, omx_position, sizeof (GstAudioChannelPosition) * pcm_param.nChannels) != 0); if (self->needs_reorder) gst_audio_get_channel_reorder_map (pcm_param.nChannels, self->position, omx_position, self->reorder_map); gst_audio_info_set_format (&self->info, gst_audio_format_build_integer (pcm_param.eNumData == OMX_NumericalDataSigned, pcm_param.eEndian == OMX_EndianLittle ? G_LITTLE_ENDIAN : G_BIG_ENDIAN, pcm_param.nBitPerSample, pcm_param.nBitPerSample), pcm_param.nSamplingRate, pcm_param.nChannels, self->position); GST_DEBUG_OBJECT (self, "Setting output state: format %s, rate %u, channels %u", gst_audio_format_to_string (self->info.finfo->format), (guint) pcm_param.nSamplingRate, (guint) pcm_param.nChannels); if (!gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (self), &self->info) || !gst_audio_decoder_negotiate (GST_AUDIO_DECODER (self))) { if (buf) gst_omx_port_release_buffer (port, buf); goto caps_failed; } GST_AUDIO_DECODER_STREAM_UNLOCK (self); if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) { err = gst_omx_port_set_enabled (port, TRUE); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_allocate_buffers (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_wait_enabled (port, 5 * GST_SECOND); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_populate (port); if (err != OMX_ErrorNone) goto reconfigure_error; err = gst_omx_port_mark_reconfigured (port); if (err != OMX_ErrorNone) goto reconfigure_error; } /* Now get a buffer */ if (acq_return != GST_OMX_ACQUIRE_BUFFER_OK) { return; } } g_assert (acq_return == GST_OMX_ACQUIRE_BUFFER_OK); if (!buf) { g_assert ((klass->cdata.hacks & GST_OMX_HACK_NO_EMPTY_EOS_BUFFER)); GST_AUDIO_DECODER_STREAM_LOCK (self); goto eos; } /* This prevents a deadlock between the srcpad stream * lock and the audiocodec stream lock, if ::reset() * is called at the wrong time */ if (gst_omx_port_is_flushing (port)) { GST_DEBUG_OBJECT (self, "Flushing"); gst_omx_port_release_buffer (port, buf); goto flushing; } GST_DEBUG_OBJECT (self, "Handling buffer: 0x%08x %" G_GUINT64_FORMAT, (guint) buf->omx_buf->nFlags, (guint64) buf->omx_buf->nTimeStamp); GST_AUDIO_DECODER_STREAM_LOCK (self); if (buf->omx_buf->nFilledLen > 0) { GstBuffer *outbuf; gint nframes, spf; GstMapInfo minfo; GstOMXAudioDecClass *klass = GST_OMX_AUDIO_DEC_GET_CLASS (self); GST_DEBUG_OBJECT (self, "Handling output data"); if (buf->omx_buf->nFilledLen % self->info.bpf != 0) { gst_omx_port_release_buffer (port, buf); goto invalid_buffer; } outbuf = gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (self), buf->omx_buf->nFilledLen); gst_buffer_map (outbuf, &minfo, GST_MAP_WRITE); if (self->needs_reorder) { gint i, n_samples, c, n_channels; gint *reorder_map = self->reorder_map; gint16 *dest, *source; dest = (gint16 *) minfo.data; source = (gint16 *) (buf->omx_buf->pBuffer + buf->omx_buf->nOffset); n_samples = buf->omx_buf->nFilledLen / self->info.bpf; n_channels = self->info.channels; for (i = 0; i < n_samples; i++) { for (c = 0; c < n_channels; c++) { dest[i * n_channels + reorder_map[c]] = source[i * n_channels + c]; } } } else { memcpy (minfo.data, buf->omx_buf->pBuffer + buf->omx_buf->nOffset, buf->omx_buf->nFilledLen); } gst_buffer_unmap (outbuf, &minfo); nframes = 1; spf = klass->get_samples_per_frame (self, self->dec_out_port); if (spf != -1) { nframes = buf->omx_buf->nFilledLen / self->info.bpf; if (nframes % spf != 0) GST_WARNING_OBJECT (self, "Output buffer does not contain an integer " "number of input frames (frames: %d, spf: %d)", nframes, spf); nframes = (nframes + spf - 1) / spf; } GST_BUFFER_TIMESTAMP (outbuf) = gst_util_uint64_scale (buf->omx_buf->nTimeStamp, GST_SECOND, OMX_TICKS_PER_SECOND); if (buf->omx_buf->nTickCount != 0) GST_BUFFER_DURATION (outbuf) = gst_util_uint64_scale (buf->omx_buf->nTickCount, GST_SECOND, OMX_TICKS_PER_SECOND); flow_ret = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (self), outbuf, nframes); } GST_DEBUG_OBJECT (self, "Read frame from component"); GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret)); if (buf) { err = gst_omx_port_release_buffer (port, buf); if (err != OMX_ErrorNone) goto release_error; } self->downstream_flow_ret = flow_ret; if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; component_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL), ("OpenMAX component in error state %s (0x%08x)", gst_omx_component_get_last_error_string (self->dec), gst_omx_component_get_last_error (self->dec))); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; return; } flushing: { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_FLUSHING; self->started = FALSE; return; } eos: { g_mutex_lock (&self->drain_lock); if (self->draining) { GST_DEBUG_OBJECT (self, "Drained"); self->draining = FALSE; g_cond_broadcast (&self->drain_cond); flow_ret = GST_FLOW_OK; gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); } else { GST_DEBUG_OBJECT (self, "Component signalled EOS"); flow_ret = GST_FLOW_EOS; } g_mutex_unlock (&self->drain_lock); GST_AUDIO_DECODER_STREAM_LOCK (self); self->downstream_flow_ret = flow_ret; /* Here we fallback and pause the task for the EOS case */ if (flow_ret != GST_FLOW_OK) goto flow_error; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } flow_error: { if (flow_ret == GST_FLOW_EOS) { GST_DEBUG_OBJECT (self, "EOS"); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->started = FALSE; } else if (flow_ret < GST_FLOW_EOS) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->started = FALSE; } else if (flow_ret == GST_FLOW_FLUSHING) { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->started = FALSE; } GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } reconfigure_error: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Unable to reconfigure output port")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; return; } invalid_buffer: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Invalid sized input buffer")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } caps_failed: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to set caps")); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); GST_AUDIO_DECODER_STREAM_UNLOCK (self); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; return; } release_error: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to relase output buffer to component: %s (0x%08x)", gst_omx_error_to_string (err), err)); gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ()); gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; GST_AUDIO_DECODER_STREAM_UNLOCK (self); return; } }
static GstFlowReturn gst_imx_audio_uniaudio_dec_handle_frame(GstAudioDecoder *dec, GstBuffer *buffer) { GstMapInfo in_map; GstBuffer *out_buffer; gsize avail_out_size; GstImxAudioUniaudioDec *imx_audio_uniaudio_dec = GST_IMX_AUDIO_UNIAUDIO_DEC(dec); int32 dec_ret; uint32 offset = 0; uint8 *in_buf = NULL; uint32 in_size = 0; gboolean dec_loop = TRUE, flow_error = FALSE; /* With some formats such as Vorbis, the first few buffers are actually redundant, * since they contain codec data that was already specified in codec_data or * streamheader caps earlier. If this is the case, skip these buffers. */ if (imx_audio_uniaudio_dec->skip_header_counter < imx_audio_uniaudio_dec->num_vorbis_headers) { GST_TRACE_OBJECT(dec, "skipping header buffer #%u", imx_audio_uniaudio_dec->skip_header_counter); ++imx_audio_uniaudio_dec->skip_header_counter; return gst_audio_decoder_finish_frame(dec, NULL, 1); } if (buffer != NULL) { gst_buffer_map(buffer, &in_map, GST_MAP_READ); in_buf = in_map.data; in_size = in_map.size; } while (dec_loop) { GstBuffer *tmp_buf; uint8 *out_buf = NULL; uint32 out_size = 0; if (buffer != NULL) GST_TRACE_OBJECT(dec, "feeding %" G_GUINT32_FORMAT " bytes to the decoder", (guint32)in_size); else GST_TRACE_OBJECT(dec, "draining decoder"); dec_ret = imx_audio_uniaudio_dec->codec->decode_frame( imx_audio_uniaudio_dec->handle, in_buf, in_size, &offset, &out_buf, &out_size ); GST_TRACE_OBJECT(dec, "decode_frame: return 0x%x offset %" G_GUINT32_FORMAT " out_size %" G_GUINT32_FORMAT, (unsigned int)dec_ret, (guint32)offset, (guint32)out_size); if ((out_buf != NULL) && (out_size > 0)) { tmp_buf = gst_audio_decoder_allocate_output_buffer(dec, out_size); tmp_buf = gst_buffer_make_writable(tmp_buf); gst_buffer_fill(tmp_buf, 0, out_buf, out_size); gst_adapter_push(imx_audio_uniaudio_dec->out_adapter, tmp_buf); } if (out_buf != NULL) { gst_imx_audio_uniaudio_dec_free(out_buf); } if ((buffer != NULL) && (offset == in_map.size)) { dec_loop = FALSE; } switch (dec_ret) { case ACODEC_SUCCESS: break; case ACODEC_END_OF_STREAM: dec_loop = FALSE; break; case ACODEC_NOT_ENOUGH_DATA: break; case ACODEC_CAPIBILITY_CHANGE: break; default: { dec_loop = FALSE; flow_error = TRUE; GST_ELEMENT_ERROR(dec, STREAM, DECODE, ("could not decode"), ("error message: %s", imx_audio_uniaudio_dec->codec->get_last_error(imx_audio_uniaudio_dec->handle))); } } } if (buffer != NULL) gst_buffer_unmap(buffer, &in_map); if (flow_error) return GST_FLOW_ERROR; if (!(imx_audio_uniaudio_dec->has_audioinfo_set)) { UniACodecParameter parameter; GstAudioFormat pcm_fmt; GstAudioInfo audio_info; imx_audio_uniaudio_dec->codec->get_parameter(imx_audio_uniaudio_dec->handle, UNIA_OUTPUT_PCM_FORMAT, ¶meter); if ((parameter.outputFormat.width == 0) || (parameter.outputFormat.depth == 0)) { GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "no output format available yet"); return gst_audio_decoder_finish_frame(dec, NULL, 1); } GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "output sample width: %" G_GUINT32_FORMAT " depth: %" G_GUINT32_FORMAT, (guint32)(parameter.outputFormat.width), (guint32)(parameter.outputFormat.depth)); pcm_fmt = gst_audio_format_build_integer(TRUE, G_BYTE_ORDER, parameter.outputFormat.width, parameter.outputFormat.depth); GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "setting output format to: %s %d Hz %d channels", gst_audio_format_to_string(pcm_fmt), (gint)(parameter.outputFormat.samplerate), (gint)(parameter.outputFormat.channels)); gst_imx_audio_uniaudio_dec_clear_channel_positions(imx_audio_uniaudio_dec); gst_imx_audio_uniaudio_dec_fill_channel_positions(imx_audio_uniaudio_dec, parameter.outputFormat.layout, parameter.outputFormat.channels); imx_audio_uniaudio_dec->pcm_format = pcm_fmt; imx_audio_uniaudio_dec->num_channels = parameter.outputFormat.channels; gst_audio_info_set_format( &audio_info, pcm_fmt, parameter.outputFormat.samplerate, parameter.outputFormat.channels, imx_audio_uniaudio_dec->reordered_channel_positions ); gst_audio_decoder_set_output_format(dec, &audio_info); imx_audio_uniaudio_dec->has_audioinfo_set = TRUE; } avail_out_size = gst_adapter_available(imx_audio_uniaudio_dec->out_adapter); if (avail_out_size > 0) { out_buffer = gst_adapter_take_buffer(imx_audio_uniaudio_dec->out_adapter, avail_out_size); if (imx_audio_uniaudio_dec->original_channel_positions != imx_audio_uniaudio_dec->reordered_channel_positions) { gst_audio_buffer_reorder_channels( out_buffer, imx_audio_uniaudio_dec->pcm_format, imx_audio_uniaudio_dec->num_channels, imx_audio_uniaudio_dec->original_channel_positions, imx_audio_uniaudio_dec->reordered_channel_positions ); } return gst_audio_decoder_finish_frame(dec, out_buffer, 1); } else { return gst_audio_decoder_finish_frame(dec, NULL, 1); } }