Esempio n. 1
0
static void
gst_flac_dec_metadata_cb (const FLAC__StreamDecoder * decoder,
    const FLAC__StreamMetadata * metadata, void *client_data)
{
  GstFlacDec *flacdec = GST_FLAC_DEC (client_data);

  GST_LOG_OBJECT (flacdec, "metadata type: %d", metadata->type);

  switch (metadata->type) {
    case FLAC__METADATA_TYPE_STREAMINFO:{
      gint64 samples;
      guint depth, width, gdepth;

      samples = metadata->data.stream_info.total_samples;

      flacdec->min_blocksize = metadata->data.stream_info.min_blocksize;
      flacdec->max_blocksize = metadata->data.stream_info.max_blocksize;
      flacdec->depth = depth = metadata->data.stream_info.bits_per_sample;

      if (depth < 9) {
        gdepth = width = 8;
      } else if (depth < 17) {
        gdepth = width = 16;
      } else if (depth < 25) {
        gdepth = 24;
        width = 32;
      } else {
        gdepth = width = 32;
      }

      gst_audio_info_set_format (&flacdec->info,
          gst_audio_format_build_integer (TRUE, G_BYTE_ORDER, width, gdepth),
          metadata->data.stream_info.sample_rate,
          metadata->data.stream_info.channels, NULL);

      memcpy (flacdec->info.position,
          channel_positions[flacdec->info.channels - 1],
          sizeof (GstAudioChannelPosition) * flacdec->info.channels);
      gst_audio_channel_positions_to_valid_order (flacdec->info.position,
          flacdec->info.channels);
      /* Note: we create the inverse reordering map here */
      gst_audio_get_channel_reorder_map (flacdec->info.channels,
          flacdec->info.position, channel_positions[flacdec->info.channels - 1],
          flacdec->channel_reorder_map);

      GST_DEBUG_OBJECT (flacdec, "blocksize: min=%u, max=%u",
          flacdec->min_blocksize, flacdec->max_blocksize);
      GST_DEBUG_OBJECT (flacdec, "sample rate: %u, channels: %u",
          flacdec->info.rate, flacdec->info.channels);
      GST_DEBUG_OBJECT (flacdec, "depth: %u, width: %u", flacdec->depth,
          flacdec->info.finfo->width);

      GST_DEBUG_OBJECT (flacdec, "total samples = %" G_GINT64_FORMAT, samples);
      break;
    }
    default:
      break;
  }
}
/* Converts an AudioStreamBasicDescription to preferred caps.
 *
 * These caps will indicate the AU element's canonical format, which won't
 * make Core Audio resample nor convert.
 *
 * NOTE ON MULTI-CHANNEL AUDIO:
 *
 * If layout is not NULL, resulting caps will only include the subset
 * of channels supported by GStreamer. If the Core Audio layout contained
 * ANY positioned channels, then ONLY positioned channels will be included
 * in the resulting caps. Otherwise, resulting caps will be unpositioned,
 * and include only unpositioned channels.
 * (Channels with unsupported AudioChannelLabel will be skipped either way.)
 *
 * Naturally, the number of channels indicated by 'channels' can be lower
 * than the AU element's total number of channels.
 */
GstCaps *
gst_core_audio_asbd_to_caps (AudioStreamBasicDescription * asbd,
    AudioChannelLayout * layout)
{
  GstAudioInfo info;
  GstAudioFormat format = GST_AUDIO_FORMAT_UNKNOWN;
  guint rate, channels, bps, endianness;
  guint64 channel_mask;
  gboolean sign, interleaved;
  GstAudioChannelPosition pos[GST_OSX_AUDIO_MAX_CHANNEL];

  if (asbd->mFormatID != kAudioFormatLinearPCM) {
    GST_WARNING ("Only linear PCM is supported");
    goto error;
  }

  if (!(asbd->mFormatFlags & kAudioFormatFlagIsPacked)) {
    GST_WARNING ("Only packed formats supported");
    goto error;
  }

  if (asbd->mFormatFlags & kLinearPCMFormatFlagsSampleFractionMask) {
    GST_WARNING ("Fixed point audio is unsupported");
    goto error;
  }

  rate = asbd->mSampleRate;
  if (rate == kAudioStreamAnyRate) {
    GST_WARNING ("No sample rate");
    goto error;
  }

  bps = asbd->mBitsPerChannel;
  endianness = asbd->mFormatFlags & kAudioFormatFlagIsBigEndian ?
      G_BIG_ENDIAN : G_LITTLE_ENDIAN;
  sign = asbd->mFormatID & kAudioFormatFlagIsSignedInteger ? TRUE : FALSE;
  interleaved = asbd->mFormatFlags & kAudioFormatFlagIsNonInterleaved ?
      TRUE : FALSE;

  if (asbd->mFormatFlags & kAudioFormatFlagIsFloat) {
    if (bps == 32) {
      if (endianness == G_LITTLE_ENDIAN)
        format = GST_AUDIO_FORMAT_F32LE;
      else
        format = GST_AUDIO_FORMAT_F32BE;

    } else if (bps == 64) {
      if (endianness == G_LITTLE_ENDIAN)
        format = GST_AUDIO_FORMAT_F64LE;
      else
        format = GST_AUDIO_FORMAT_F64BE;
    }
  } else {
    format = gst_audio_format_build_integer (sign, endianness, bps, bps);
  }

  if (format == GST_AUDIO_FORMAT_UNKNOWN) {
    GST_WARNING ("Unsupported sample format");
    goto error;
  }

  if (layout) {
    if (!gst_core_audio_parse_channel_layout (layout, &channels, &channel_mask,
            pos)) {
      GST_WARNING
          ("Failed to parse channel layout, best effort channels layout mapping will be used");
      layout = NULL;
    }
  }

  if (layout) {
    /* The AU can have arbitrary channel order, but we're using GstAudioInfo
     * which supports only the GStreamer channel order.
     * Also, we're eventually producing caps, which only have channel-mask
     * (whose implied order is the GStreamer channel order). */
    gst_audio_channel_positions_to_valid_order (pos, channels);

    gst_audio_info_set_format (&info, format, rate, channels, pos);
  } else {
    channels = MIN (asbd->mChannelsPerFrame, GST_OSX_AUDIO_MAX_CHANNEL);
    gst_audio_info_set_format (&info, format, rate, channels, NULL);
  }

  return gst_audio_info_to_caps (&info);

error:
  return NULL;
}
Esempio n. 3
0
static HRESULT WINAPI Gstreamer_AudioConvert_SetMediaType(TransformFilter *tf, PIN_DIRECTION dir, const AM_MEDIA_TYPE *amt)
{
    GstTfImpl *This = (GstTfImpl*)tf;
    GstCaps *capsin, *capsout;
    AM_MEDIA_TYPE *outpmt = &This->tf.pmt;
    WAVEFORMATEX *inwfe;
    WAVEFORMATEX *outwfe;
    WAVEFORMATEXTENSIBLE *outwfx;
    GstAudioFormat format;
    HRESULT hr;
    BOOL inisfloat = FALSE;
    int indepth;

    TRACE("%p 0x%x %p\n", This, dir, amt);

    mark_wine_thread();

    if (dir != PINDIR_INPUT)
        return S_OK;

    if (Gstreamer_AudioConvert_QueryConnect(&This->tf, amt) == S_FALSE || !amt->pbFormat)
        return E_FAIL;

    FreeMediaType(outpmt);
    *outpmt = *amt;
    outpmt->pUnk = NULL;
    outpmt->cbFormat = sizeof(WAVEFORMATEXTENSIBLE);
    outpmt->pbFormat = CoTaskMemAlloc(outpmt->cbFormat);

    inwfe = (WAVEFORMATEX*)amt->pbFormat;
    indepth = inwfe->wBitsPerSample;
    if (inwfe->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
        WAVEFORMATEXTENSIBLE *inwfx = (WAVEFORMATEXTENSIBLE*)inwfe;
        inisfloat = IsEqualGUID(&inwfx->SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT);
        if (inwfx->Samples.wValidBitsPerSample)
            indepth = inwfx->Samples.wValidBitsPerSample;
    } else if (inwfe->wFormatTag == WAVE_FORMAT_IEEE_FLOAT)
        inisfloat = TRUE;

    if (inisfloat)
        format = inwfe->wBitsPerSample == 64 ? GST_AUDIO_FORMAT_F64LE : GST_AUDIO_FORMAT_F32LE;
    else
        format = gst_audio_format_build_integer(inwfe->wBitsPerSample != 8, G_LITTLE_ENDIAN,
                inwfe->wBitsPerSample, indepth);

    capsin = gst_caps_new_simple("audio/x-raw",
                                 "format", G_TYPE_STRING, gst_audio_format_to_string(format),
                                 "channels", G_TYPE_INT, inwfe->nChannels,
                                 "rate", G_TYPE_INT, inwfe->nSamplesPerSec,
                                  NULL);

    outwfe = (WAVEFORMATEX*)outpmt->pbFormat;
    outwfx = (WAVEFORMATEXTENSIBLE*)outwfe;
    outwfe->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
    outwfe->nChannels = 2;
    outwfe->nSamplesPerSec = inwfe->nSamplesPerSec;
    outwfe->wBitsPerSample = 16;
    outwfe->nBlockAlign = outwfe->nChannels * outwfe->wBitsPerSample / 8;
    outwfe->nAvgBytesPerSec = outwfe->nBlockAlign * outwfe->nSamplesPerSec;
    outwfe->cbSize = sizeof(*outwfx) - sizeof(*outwfe);
    outwfx->Samples.wValidBitsPerSample = outwfe->wBitsPerSample;
    outwfx->dwChannelMask = SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT;
    outwfx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;

    capsout = gst_caps_new_simple("audio/x-raw",
                                  "format", G_TYPE_STRING, "S16LE",
                                  "channels", G_TYPE_INT, outwfe->nChannels,
                                  "rate", G_TYPE_INT, outwfe->nSamplesPerSec,
                                   NULL);


    hr = Gstreamer_transform_ConnectInput(This, amt, capsin, capsout);
    gst_caps_unref(capsin);
    gst_caps_unref(capsout);

    This->cbBuffer = inwfe->nAvgBytesPerSec;
    return hr;
}
Esempio n. 4
0
static FLAC__StreamDecoderWriteStatus
gst_flac_dec_write (GstFlacDec * flacdec, const FLAC__Frame * frame,
    const FLAC__int32 * const buffer[])
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *outbuf;
  guint depth = frame->header.bits_per_sample;
  guint width, gdepth;
  guint sample_rate = frame->header.sample_rate;
  guint channels = frame->header.channels;
  guint samples = frame->header.blocksize;
  guint j, i;
  GstMapInfo map;
  gboolean caps_changed;

  GST_LOG_OBJECT (flacdec, "samples in frame header: %d", samples);

  if (depth == 0) {
    if (flacdec->depth < 4 || flacdec->depth > 32) {
      GST_ERROR_OBJECT (flacdec, "unsupported depth %d from STREAMINFO",
          flacdec->depth);
      ret = GST_FLOW_ERROR;
      goto done;
    }

    depth = flacdec->depth;
  }

  switch (depth) {
    case 8:
      gdepth = width = 8;
      break;
    case 12:
    case 16:
      gdepth = width = 16;
      break;
    case 20:
    case 24:
      gdepth = 24;
      width = 32;
      break;
    case 32:
      gdepth = width = 32;
      break;
    default:
      GST_ERROR_OBJECT (flacdec, "unsupported depth %d", depth);
      ret = GST_FLOW_ERROR;
      goto done;
  }

  if (sample_rate == 0) {
    if (flacdec->info.rate != 0) {
      sample_rate = flacdec->info.rate;
    } else {
      GST_ERROR_OBJECT (flacdec, "unknown sample rate");
      ret = GST_FLOW_ERROR;
      goto done;
    }
  }

  caps_changed = (sample_rate != GST_AUDIO_INFO_RATE (&flacdec->info))
      || (width != GST_AUDIO_INFO_WIDTH (&flacdec->info))
      || (gdepth != GST_AUDIO_INFO_DEPTH (&flacdec->info))
      || (channels != GST_AUDIO_INFO_CHANNELS (&flacdec->info));

  if (caps_changed
      || !gst_pad_has_current_caps (GST_AUDIO_DECODER_SRC_PAD (flacdec))) {
    GST_DEBUG_OBJECT (flacdec, "Negotiating %d Hz @ %d channels", sample_rate,
        channels);

    gst_audio_info_set_format (&flacdec->info,
        gst_audio_format_build_integer (TRUE, G_BYTE_ORDER, width, gdepth),
        sample_rate, channels, NULL);

    memcpy (flacdec->info.position,
        channel_positions[flacdec->info.channels - 1],
        sizeof (GstAudioChannelPosition) * flacdec->info.channels);
    gst_audio_channel_positions_to_valid_order (flacdec->info.position,
        flacdec->info.channels);
    /* Note: we create the inverse reordering map here */
    gst_audio_get_channel_reorder_map (flacdec->info.channels,
        flacdec->info.position, channel_positions[flacdec->info.channels - 1],
        flacdec->channel_reorder_map);

    flacdec->depth = depth;

    gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (flacdec),
        &flacdec->info);
  }

  outbuf =
      gst_buffer_new_allocate (NULL, samples * channels * (width / 8), NULL);

  gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
  if (width == 8) {
    gint8 *outbuffer = (gint8 *) map.data;
    gint *reorder_map = flacdec->channel_reorder_map;

    if (gdepth != depth) {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ =
              (gint8) (buffer[reorder_map[j]][i] << (gdepth - depth));
        }
      }
    } else {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ = (gint8) buffer[reorder_map[j]][i];
        }
      }
    }
  } else if (width == 16) {
    gint16 *outbuffer = (gint16 *) map.data;
    gint *reorder_map = flacdec->channel_reorder_map;

    if (gdepth != depth) {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ =
              (gint16) (buffer[reorder_map[j]][i] << (gdepth - depth));
        }
      }
    } else {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ = (gint16) buffer[reorder_map[j]][i];
        }
      }
    }
  } else if (width == 32) {
    gint32 *outbuffer = (gint32 *) map.data;
    gint *reorder_map = flacdec->channel_reorder_map;

    if (gdepth != depth) {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ =
              (gint32) (buffer[reorder_map[j]][i] << (gdepth - depth));
        }
      }
    } else {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ = (gint32) buffer[reorder_map[j]][i];
        }
      }
    }
  } else {
    g_assert_not_reached ();
  }
  gst_buffer_unmap (outbuf, &map);

  GST_DEBUG_OBJECT (flacdec, "pushing %d samples", samples);
  if (flacdec->error_count)
    flacdec->error_count--;

  ret = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (flacdec), outbuf, 1);

  if (G_UNLIKELY (ret != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (flacdec, "finish_frame flow %s", gst_flow_get_name (ret));
  }

done:

  /* we act on the flow return value later in the handle_frame function, as we
   * don't want to mess up the internal decoder state by returning ABORT when
   * the error is in fact non-fatal (like a pad in flushing mode) and we want
   * to continue later. So just pretend everything's dandy and act later. */
  flacdec->last_flow = ret;

  return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
}
static GstCaps *
gst_dshowaudiosrc_getcaps_from_streamcaps (GstDshowAudioSrc * src, IPin * pin,
    IAMStreamConfig * streamcaps)
{
  GstCaps *caps = NULL;
  HRESULT hres = S_OK;
  int icount = 0;
  int isize = 0;
  AUDIO_STREAM_CONFIG_CAPS ascc;
  int i = 0;

  if (!streamcaps)
    return NULL;

  streamcaps->GetNumberOfCapabilities (&icount, &isize);

  if (isize != sizeof (ascc))
    return NULL;

  for (; i < icount; i++) {
    GstCapturePinMediaType *pin_mediatype = g_new0 (GstCapturePinMediaType, 1);

    pin->AddRef ();
    pin_mediatype->capture_pin = pin;

    hres = streamcaps->GetStreamCaps (i, &pin_mediatype->mediatype,
        (BYTE *) & ascc);
    if (hres == S_OK && pin_mediatype->mediatype) {
      GstCaps *mediacaps = NULL;

      if (!caps)
        caps = gst_caps_new_empty ();

      if (gst_dshow_check_mediatype (pin_mediatype->mediatype, MEDIASUBTYPE_PCM,
              FORMAT_WaveFormatEx)) {
	GstAudioFormat format = GST_AUDIO_FORMAT_UNKNOWN;
        WAVEFORMATEX *wavformat =
            (WAVEFORMATEX *) pin_mediatype->mediatype->pbFormat;

	switch (wavformat->wFormatTag) {
            case WAVE_FORMAT_PCM:
	      format = gst_audio_format_build_integer (TRUE, G_BYTE_ORDER, wavformat->wBitsPerSample, wavformat->wBitsPerSample);
	      break;
            default:
	      break;
	}

	if (format != GST_AUDIO_FORMAT_UNKNOWN) {
	  GstAudioInfo info;

	  gst_audio_info_init(&info);
	  gst_audio_info_set_format(&info,
				    format,
				    wavformat->nSamplesPerSec,
				    wavformat->nChannels,
				    NULL);
	  mediacaps = gst_audio_info_to_caps(&info);
	}

        if (mediacaps) {
          src->pins_mediatypes =
              g_list_append (src->pins_mediatypes, pin_mediatype);
          gst_caps_append (caps, mediacaps);
        } else {
          gst_dshow_free_pin_mediatype (pin_mediatype);
        }
      } else {
        gst_dshow_free_pin_mediatype (pin_mediatype);
      }
    } else {
      gst_dshow_free_pin_mediatype (pin_mediatype);
    }
  }

  if (caps && gst_caps_is_empty (caps)) {
    gst_caps_unref (caps);
    caps = NULL;
  }

  return caps;
}
static void
gst_omx_audio_dec_loop (GstOMXAudioDec * self)
{
  GstOMXPort *port = self->dec_out_port;
  GstOMXBuffer *buf = NULL;
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstOMXAcquireBufferReturn acq_return;
  OMX_ERRORTYPE err;

  acq_return = gst_omx_port_acquire_buffer (port, &buf);
  if (acq_return == GST_OMX_ACQUIRE_BUFFER_ERROR) {
    goto component_error;
  } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_FLUSHING) {
    goto flushing;
  } else if (acq_return == GST_OMX_ACQUIRE_BUFFER_EOS) {
    goto eos;
  }

  if (!gst_pad_has_current_caps (GST_AUDIO_DECODER_SRC_PAD (self)) ||
      acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
    OMX_PARAM_PORTDEFINITIONTYPE port_def;
    OMX_AUDIO_PARAM_PCMMODETYPE pcm_param;
    GstAudioChannelPosition omx_position[OMX_AUDIO_MAXCHANNELS];
    GstOMXAudioDecClass *klass = GST_OMX_AUDIO_DEC_GET_CLASS (self);
    gint i;

    GST_DEBUG_OBJECT (self, "Port settings have changed, updating caps");

    /* Reallocate all buffers */
    if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE
        && gst_omx_port_is_enabled (port)) {
      err = gst_omx_port_set_enabled (port, FALSE);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_buffers_released (port, 5 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_deallocate_buffers (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_enabled (port, 1 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;
    }

    /* Just update caps */
    GST_AUDIO_DECODER_STREAM_LOCK (self);

    gst_omx_port_get_port_definition (port, &port_def);
    g_assert (port_def.format.audio.eEncoding == OMX_AUDIO_CodingPCM);

    GST_OMX_INIT_STRUCT (&pcm_param);
    pcm_param.nPortIndex = self->dec_out_port->index;
    err =
        gst_omx_component_get_parameter (self->dec, OMX_IndexParamAudioPcm,
        &pcm_param);
    if (err != OMX_ErrorNone) {
      GST_ERROR_OBJECT (self, "Failed to get PCM parameters: %s (0x%08x)",
          gst_omx_error_to_string (err), err);
      goto caps_failed;
    }

    g_assert (pcm_param.ePCMMode == OMX_AUDIO_PCMModeLinear);
    g_assert (pcm_param.bInterleaved == OMX_TRUE);

    gst_audio_info_init (&self->info);

    for (i = 0; i < pcm_param.nChannels; i++) {
      switch (pcm_param.eChannelMapping[i]) {
        case OMX_AUDIO_ChannelLF:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT;
          break;
        case OMX_AUDIO_ChannelRF:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT;
          break;
        case OMX_AUDIO_ChannelCF:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER;
          break;
        case OMX_AUDIO_ChannelLS:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT;
          break;
        case OMX_AUDIO_ChannelRS:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT;
          break;
        case OMX_AUDIO_ChannelLFE:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_LFE1;
          break;
        case OMX_AUDIO_ChannelCS:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_CENTER;
          break;
        case OMX_AUDIO_ChannelLR:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_LEFT;
          break;
        case OMX_AUDIO_ChannelRR:
          omx_position[i] = GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT;
          break;
        case OMX_AUDIO_ChannelNone:
        default:
          /* This will break the outer loop too as the
           * i == pcm_param.nChannels afterwards */
          for (i = 0; i < pcm_param.nChannels; i++)
            omx_position[i] = GST_AUDIO_CHANNEL_POSITION_NONE;
          break;
      }
    }
    if (pcm_param.nChannels == 1
        && omx_position[0] == GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER)
      omx_position[0] = GST_AUDIO_CHANNEL_POSITION_MONO;

    if (omx_position[0] == GST_AUDIO_CHANNEL_POSITION_NONE
        && klass->get_channel_positions) {
      GST_WARNING_OBJECT (self,
          "Failed to get a valid channel layout, trying fallback");
      klass->get_channel_positions (self, self->dec_out_port, omx_position);
    }

    memcpy (self->position, omx_position, sizeof (omx_position));
    gst_audio_channel_positions_to_valid_order (self->position,
        pcm_param.nChannels);
    self->needs_reorder =
        (memcmp (self->position, omx_position,
            sizeof (GstAudioChannelPosition) * pcm_param.nChannels) != 0);
    if (self->needs_reorder)
      gst_audio_get_channel_reorder_map (pcm_param.nChannels, self->position,
          omx_position, self->reorder_map);

    gst_audio_info_set_format (&self->info,
        gst_audio_format_build_integer (pcm_param.eNumData ==
            OMX_NumericalDataSigned,
            pcm_param.eEndian ==
            OMX_EndianLittle ? G_LITTLE_ENDIAN : G_BIG_ENDIAN,
            pcm_param.nBitPerSample, pcm_param.nBitPerSample),
        pcm_param.nSamplingRate, pcm_param.nChannels, self->position);

    GST_DEBUG_OBJECT (self,
        "Setting output state: format %s, rate %u, channels %u",
        gst_audio_format_to_string (self->info.finfo->format),
        (guint) pcm_param.nSamplingRate, (guint) pcm_param.nChannels);

    if (!gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (self),
            &self->info)
        || !gst_audio_decoder_negotiate (GST_AUDIO_DECODER (self))) {
      if (buf)
        gst_omx_port_release_buffer (port, buf);
      goto caps_failed;
    }

    GST_AUDIO_DECODER_STREAM_UNLOCK (self);

    if (acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURE) {
      err = gst_omx_port_set_enabled (port, TRUE);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_allocate_buffers (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_wait_enabled (port, 5 * GST_SECOND);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_populate (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;

      err = gst_omx_port_mark_reconfigured (port);
      if (err != OMX_ErrorNone)
        goto reconfigure_error;
    }

    /* Now get a buffer */
    if (acq_return != GST_OMX_ACQUIRE_BUFFER_OK) {
      return;
    }
  }

  g_assert (acq_return == GST_OMX_ACQUIRE_BUFFER_OK);
  if (!buf) {
    g_assert ((klass->cdata.hacks & GST_OMX_HACK_NO_EMPTY_EOS_BUFFER));
    GST_AUDIO_DECODER_STREAM_LOCK (self);
    goto eos;
  }

  /* This prevents a deadlock between the srcpad stream
   * lock and the audiocodec stream lock, if ::reset()
   * is called at the wrong time
   */
  if (gst_omx_port_is_flushing (port)) {
    GST_DEBUG_OBJECT (self, "Flushing");
    gst_omx_port_release_buffer (port, buf);
    goto flushing;
  }

  GST_DEBUG_OBJECT (self, "Handling buffer: 0x%08x %" G_GUINT64_FORMAT,
      (guint) buf->omx_buf->nFlags, (guint64) buf->omx_buf->nTimeStamp);

  GST_AUDIO_DECODER_STREAM_LOCK (self);

  if (buf->omx_buf->nFilledLen > 0) {
    GstBuffer *outbuf;
    gint nframes, spf;
    GstMapInfo minfo;
    GstOMXAudioDecClass *klass = GST_OMX_AUDIO_DEC_GET_CLASS (self);

    GST_DEBUG_OBJECT (self, "Handling output data");

    if (buf->omx_buf->nFilledLen % self->info.bpf != 0) {
      gst_omx_port_release_buffer (port, buf);
      goto invalid_buffer;
    }

    outbuf =
        gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (self),
        buf->omx_buf->nFilledLen);

    gst_buffer_map (outbuf, &minfo, GST_MAP_WRITE);
    if (self->needs_reorder) {
      gint i, n_samples, c, n_channels;
      gint *reorder_map = self->reorder_map;
      gint16 *dest, *source;

      dest = (gint16 *) minfo.data;
      source = (gint16 *) (buf->omx_buf->pBuffer + buf->omx_buf->nOffset);
      n_samples = buf->omx_buf->nFilledLen / self->info.bpf;
      n_channels = self->info.channels;

      for (i = 0; i < n_samples; i++) {
        for (c = 0; c < n_channels; c++) {
          dest[i * n_channels + reorder_map[c]] = source[i * n_channels + c];
        }
      }
    } else {
      memcpy (minfo.data, buf->omx_buf->pBuffer + buf->omx_buf->nOffset,
          buf->omx_buf->nFilledLen);
    }
    gst_buffer_unmap (outbuf, &minfo);

    nframes = 1;
    spf = klass->get_samples_per_frame (self, self->dec_out_port);
    if (spf != -1) {
      nframes = buf->omx_buf->nFilledLen / self->info.bpf;
      if (nframes % spf != 0)
        GST_WARNING_OBJECT (self, "Output buffer does not contain an integer "
            "number of input frames (frames: %d, spf: %d)", nframes, spf);
      nframes = (nframes + spf - 1) / spf;
    }

    GST_BUFFER_TIMESTAMP (outbuf) =
        gst_util_uint64_scale (buf->omx_buf->nTimeStamp, GST_SECOND,
        OMX_TICKS_PER_SECOND);
    if (buf->omx_buf->nTickCount != 0)
      GST_BUFFER_DURATION (outbuf) =
          gst_util_uint64_scale (buf->omx_buf->nTickCount, GST_SECOND,
          OMX_TICKS_PER_SECOND);

    flow_ret =
        gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (self), outbuf,
        nframes);
  }

  GST_DEBUG_OBJECT (self, "Read frame from component");

  GST_DEBUG_OBJECT (self, "Finished frame: %s", gst_flow_get_name (flow_ret));

  if (buf) {
    err = gst_omx_port_release_buffer (port, buf);
    if (err != OMX_ErrorNone)
      goto release_error;
  }

  self->downstream_flow_ret = flow_ret;

  if (flow_ret != GST_FLOW_OK)
    goto flow_error;

  GST_AUDIO_DECODER_STREAM_UNLOCK (self);

  return;

component_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, FAILED, (NULL),
        ("OpenMAX component in error state %s (0x%08x)",
            gst_omx_component_get_last_error_string (self->dec),
            gst_omx_component_get_last_error (self->dec)));
    gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    self->started = FALSE;
    return;
  }

flushing:
  {
    GST_DEBUG_OBJECT (self, "Flushing -- stopping task");
    gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_FLUSHING;
    self->started = FALSE;
    return;
  }

eos:
  {
    g_mutex_lock (&self->drain_lock);
    if (self->draining) {
      GST_DEBUG_OBJECT (self, "Drained");
      self->draining = FALSE;
      g_cond_broadcast (&self->drain_cond);
      flow_ret = GST_FLOW_OK;
      gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
    } else {
      GST_DEBUG_OBJECT (self, "Component signalled EOS");
      flow_ret = GST_FLOW_EOS;
    }
    g_mutex_unlock (&self->drain_lock);

    GST_AUDIO_DECODER_STREAM_LOCK (self);
    self->downstream_flow_ret = flow_ret;

    /* Here we fallback and pause the task for the EOS case */
    if (flow_ret != GST_FLOW_OK)
      goto flow_error;

    GST_AUDIO_DECODER_STREAM_UNLOCK (self);

    return;
  }

flow_error:
  {
    if (flow_ret == GST_FLOW_EOS) {
      GST_DEBUG_OBJECT (self, "EOS");

      gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
      self->started = FALSE;
    } else if (flow_ret < GST_FLOW_EOS) {
      GST_ELEMENT_ERROR (self, STREAM, FAILED,
          ("Internal data stream error."), ("stream stopped, reason %s",
              gst_flow_get_name (flow_ret)));

      gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self),
          gst_event_new_eos ());
      gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
      self->started = FALSE;
    } else if (flow_ret == GST_FLOW_FLUSHING) {
      GST_DEBUG_OBJECT (self, "Flushing -- stopping task");
      gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
      self->started = FALSE;
    }
    GST_AUDIO_DECODER_STREAM_UNLOCK (self);
    return;
  }

reconfigure_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Unable to reconfigure output port"));
    gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    self->started = FALSE;
    return;
  }

invalid_buffer:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Invalid sized input buffer"));
    gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED;
    self->started = FALSE;
    GST_AUDIO_DECODER_STREAM_UNLOCK (self);
    return;
  }

caps_failed:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to set caps"));
    gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
    GST_AUDIO_DECODER_STREAM_UNLOCK (self);
    self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED;
    self->started = FALSE;
    return;
  }
release_error:
  {
    GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL),
        ("Failed to relase output buffer to component: %s (0x%08x)",
            gst_omx_error_to_string (err), err));
    gst_pad_push_event (GST_AUDIO_DECODER_SRC_PAD (self), gst_event_new_eos ());
    gst_pad_pause_task (GST_AUDIO_DECODER_SRC_PAD (self));
    self->downstream_flow_ret = GST_FLOW_ERROR;
    self->started = FALSE;
    GST_AUDIO_DECODER_STREAM_UNLOCK (self);
    return;
  }
}
Esempio n. 7
0
static GstFlowReturn gst_imx_audio_uniaudio_dec_handle_frame(GstAudioDecoder *dec, GstBuffer *buffer)
{
	GstMapInfo in_map;
	GstBuffer *out_buffer;
	gsize avail_out_size;
	GstImxAudioUniaudioDec *imx_audio_uniaudio_dec = GST_IMX_AUDIO_UNIAUDIO_DEC(dec);
	int32 dec_ret;
	uint32 offset = 0;
	uint8 *in_buf = NULL;
	uint32 in_size = 0;
	gboolean dec_loop = TRUE, flow_error = FALSE;

	/* With some formats such as Vorbis, the first few buffers are actually redundant,
	 * since they contain codec data that was already specified in codec_data or
	 * streamheader caps earlier. If this is the case, skip these buffers. */
	if (imx_audio_uniaudio_dec->skip_header_counter < imx_audio_uniaudio_dec->num_vorbis_headers)
	{
		GST_TRACE_OBJECT(dec, "skipping header buffer #%u", imx_audio_uniaudio_dec->skip_header_counter);
		++imx_audio_uniaudio_dec->skip_header_counter;
		return gst_audio_decoder_finish_frame(dec, NULL, 1);
	}

	if (buffer != NULL)
	{
		gst_buffer_map(buffer, &in_map, GST_MAP_READ);
		in_buf = in_map.data;
		in_size = in_map.size;
	}

	while (dec_loop)
	{
		GstBuffer *tmp_buf;
		uint8 *out_buf = NULL;
		uint32 out_size = 0;

		if (buffer != NULL)
			GST_TRACE_OBJECT(dec, "feeding %" G_GUINT32_FORMAT " bytes to the decoder", (guint32)in_size);
		else
			GST_TRACE_OBJECT(dec, "draining decoder");

		dec_ret = imx_audio_uniaudio_dec->codec->decode_frame(
			imx_audio_uniaudio_dec->handle,
			in_buf, in_size,
			&offset,
			&out_buf, &out_size
		);

		GST_TRACE_OBJECT(dec, "decode_frame:  return 0x%x  offset %" G_GUINT32_FORMAT "  out_size %" G_GUINT32_FORMAT, (unsigned int)dec_ret, (guint32)offset, (guint32)out_size);

		if ((out_buf != NULL) && (out_size > 0))
		{
			tmp_buf = gst_audio_decoder_allocate_output_buffer(dec, out_size);
			tmp_buf = gst_buffer_make_writable(tmp_buf);
			gst_buffer_fill(tmp_buf, 0, out_buf, out_size);
			gst_adapter_push(imx_audio_uniaudio_dec->out_adapter, tmp_buf);
		}

		if (out_buf != NULL)
		{
			gst_imx_audio_uniaudio_dec_free(out_buf);
		}

		if ((buffer != NULL) && (offset == in_map.size))
		{
			dec_loop = FALSE;
		}

		switch (dec_ret)
		{
			case ACODEC_SUCCESS:
				break;
			case ACODEC_END_OF_STREAM:
				dec_loop = FALSE;
				break;
			case ACODEC_NOT_ENOUGH_DATA:
				break;
			case ACODEC_CAPIBILITY_CHANGE:
				break;
			default:
			{
				dec_loop = FALSE;
				flow_error = TRUE;
				GST_ELEMENT_ERROR(dec, STREAM, DECODE, ("could not decode"), ("error message: %s", imx_audio_uniaudio_dec->codec->get_last_error(imx_audio_uniaudio_dec->handle)));
			}
		}
	}

	if (buffer != NULL)
		gst_buffer_unmap(buffer, &in_map);

	if (flow_error)
		return GST_FLOW_ERROR;

	if (!(imx_audio_uniaudio_dec->has_audioinfo_set))
	{
		UniACodecParameter parameter;
		GstAudioFormat pcm_fmt;
		GstAudioInfo audio_info;

		imx_audio_uniaudio_dec->codec->get_parameter(imx_audio_uniaudio_dec->handle, UNIA_OUTPUT_PCM_FORMAT, &parameter);

		if ((parameter.outputFormat.width == 0) || (parameter.outputFormat.depth == 0))
		{
			GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "no output format available yet");
			return gst_audio_decoder_finish_frame(dec, NULL, 1);
		}

		GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "output sample width: %" G_GUINT32_FORMAT "  depth: %" G_GUINT32_FORMAT, (guint32)(parameter.outputFormat.width), (guint32)(parameter.outputFormat.depth));
		pcm_fmt = gst_audio_format_build_integer(TRUE, G_BYTE_ORDER, parameter.outputFormat.width, parameter.outputFormat.depth);

		GST_DEBUG_OBJECT(imx_audio_uniaudio_dec, "setting output format to: %s  %d Hz  %d channels", gst_audio_format_to_string(pcm_fmt), (gint)(parameter.outputFormat.samplerate), (gint)(parameter.outputFormat.channels));

		gst_imx_audio_uniaudio_dec_clear_channel_positions(imx_audio_uniaudio_dec);
		gst_imx_audio_uniaudio_dec_fill_channel_positions(imx_audio_uniaudio_dec, parameter.outputFormat.layout, parameter.outputFormat.channels);

		imx_audio_uniaudio_dec->pcm_format = pcm_fmt;
		imx_audio_uniaudio_dec->num_channels = parameter.outputFormat.channels;

		gst_audio_info_set_format(
			&audio_info,
			pcm_fmt,
			parameter.outputFormat.samplerate,
			parameter.outputFormat.channels,
			imx_audio_uniaudio_dec->reordered_channel_positions
		);
		gst_audio_decoder_set_output_format(dec, &audio_info);

		imx_audio_uniaudio_dec->has_audioinfo_set = TRUE;
	}


	avail_out_size = gst_adapter_available(imx_audio_uniaudio_dec->out_adapter);

	if (avail_out_size > 0)
	{
		out_buffer = gst_adapter_take_buffer(imx_audio_uniaudio_dec->out_adapter, avail_out_size);
		if (imx_audio_uniaudio_dec->original_channel_positions != imx_audio_uniaudio_dec->reordered_channel_positions)
		{
			gst_audio_buffer_reorder_channels(
				out_buffer,
				imx_audio_uniaudio_dec->pcm_format,
				imx_audio_uniaudio_dec->num_channels,
				imx_audio_uniaudio_dec->original_channel_positions,
				imx_audio_uniaudio_dec->reordered_channel_positions
			);
		}
		return gst_audio_decoder_finish_frame(dec, out_buffer, 1);
	}
	else
	{
		return gst_audio_decoder_finish_frame(dec, NULL, 1);
	}
}