예제 #1
0
static FLAC__StreamDecoderWriteStatus
gst_flac_dec_write (GstFlacDec * flacdec, const FLAC__Frame * frame,
    const FLAC__int32 * const buffer[])
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *outbuf;
  guint depth = frame->header.bits_per_sample;
  guint width, gdepth;
  guint sample_rate = frame->header.sample_rate;
  guint channels = frame->header.channels;
  guint samples = frame->header.blocksize;
  guint j, i;
  GstMapInfo map;
  gboolean caps_changed;

  GST_LOG_OBJECT (flacdec, "samples in frame header: %d", samples);

  if (depth == 0) {
    if (flacdec->depth < 4 || flacdec->depth > 32) {
      GST_ERROR_OBJECT (flacdec, "unsupported depth %d from STREAMINFO",
          flacdec->depth);
      ret = GST_FLOW_ERROR;
      goto done;
    }

    depth = flacdec->depth;
  }

  switch (depth) {
    case 8:
      gdepth = width = 8;
      break;
    case 12:
    case 16:
      gdepth = width = 16;
      break;
    case 20:
    case 24:
      gdepth = 24;
      width = 32;
      break;
    case 32:
      gdepth = width = 32;
      break;
    default:
      GST_ERROR_OBJECT (flacdec, "unsupported depth %d", depth);
      ret = GST_FLOW_ERROR;
      goto done;
  }

  if (sample_rate == 0) {
    if (flacdec->info.rate != 0) {
      sample_rate = flacdec->info.rate;
    } else {
      GST_ERROR_OBJECT (flacdec, "unknown sample rate");
      ret = GST_FLOW_ERROR;
      goto done;
    }
  }

  caps_changed = (sample_rate != GST_AUDIO_INFO_RATE (&flacdec->info))
      || (width != GST_AUDIO_INFO_WIDTH (&flacdec->info))
      || (gdepth != GST_AUDIO_INFO_DEPTH (&flacdec->info))
      || (channels != GST_AUDIO_INFO_CHANNELS (&flacdec->info));

  if (caps_changed
      || !gst_pad_has_current_caps (GST_AUDIO_DECODER_SRC_PAD (flacdec))) {
    GST_DEBUG_OBJECT (flacdec, "Negotiating %d Hz @ %d channels", sample_rate,
        channels);

    gst_audio_info_set_format (&flacdec->info,
        gst_audio_format_build_integer (TRUE, G_BYTE_ORDER, width, gdepth),
        sample_rate, channels, NULL);

    memcpy (flacdec->info.position,
        channel_positions[flacdec->info.channels - 1],
        sizeof (GstAudioChannelPosition) * flacdec->info.channels);
    gst_audio_channel_positions_to_valid_order (flacdec->info.position,
        flacdec->info.channels);
    /* Note: we create the inverse reordering map here */
    gst_audio_get_channel_reorder_map (flacdec->info.channels,
        flacdec->info.position, channel_positions[flacdec->info.channels - 1],
        flacdec->channel_reorder_map);

    flacdec->depth = depth;

    gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (flacdec),
        &flacdec->info);
  }

  outbuf =
      gst_buffer_new_allocate (NULL, samples * channels * (width / 8), NULL);

  gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
  if (width == 8) {
    gint8 *outbuffer = (gint8 *) map.data;
    gint *reorder_map = flacdec->channel_reorder_map;

    if (gdepth != depth) {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ =
              (gint8) (buffer[reorder_map[j]][i] << (gdepth - depth));
        }
      }
    } else {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ = (gint8) buffer[reorder_map[j]][i];
        }
      }
    }
  } else if (width == 16) {
    gint16 *outbuffer = (gint16 *) map.data;
    gint *reorder_map = flacdec->channel_reorder_map;

    if (gdepth != depth) {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ =
              (gint16) (buffer[reorder_map[j]][i] << (gdepth - depth));
        }
      }
    } else {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ = (gint16) buffer[reorder_map[j]][i];
        }
      }
    }
  } else if (width == 32) {
    gint32 *outbuffer = (gint32 *) map.data;
    gint *reorder_map = flacdec->channel_reorder_map;

    if (gdepth != depth) {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ =
              (gint32) (buffer[reorder_map[j]][i] << (gdepth - depth));
        }
      }
    } else {
      for (i = 0; i < samples; i++) {
        for (j = 0; j < channels; j++) {
          *outbuffer++ = (gint32) buffer[reorder_map[j]][i];
        }
      }
    }
  } else {
    g_assert_not_reached ();
  }
  gst_buffer_unmap (outbuf, &map);

  GST_DEBUG_OBJECT (flacdec, "pushing %d samples", samples);
  if (flacdec->error_count)
    flacdec->error_count--;

  ret = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (flacdec), outbuf, 1);

  if (G_UNLIKELY (ret != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (flacdec, "finish_frame flow %s", gst_flow_get_name (ret));
  }

done:

  /* we act on the flow return value later in the handle_frame function, as we
   * don't want to mess up the internal decoder state by returning ABORT when
   * the error is in fact non-fatal (like a pad in flushing mode) and we want
   * to continue later. So just pretend everything's dandy and act later. */
  flacdec->last_flow = ret;

  return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
}
예제 #2
0
gboolean
gst_sndio_prepare (struct gstsndio *sio, GstAudioRingBufferSpec *spec)
{
  struct sio_par par, retpar;
  unsigned nchannels;

  GST_DEBUG_OBJECT (sio, "prepare");

  if (spec->type != GST_AUDIO_RING_BUFFER_FORMAT_TYPE_RAW) {
      GST_ELEMENT_ERROR (sio, RESOURCE, OPEN_READ_WRITE,
	("Only raw buffer format supported by sndio"), (NULL));
      return FALSE;
  }
  if (!GST_AUDIO_INFO_IS_INTEGER(&spec->info)) {
      GST_ELEMENT_ERROR (sio, RESOURCE, OPEN_READ_WRITE,
	("Only integer format supported"), (NULL));
      return FALSE;
  }
  if (GST_AUDIO_INFO_DEPTH(&spec->info) % 8) {
      GST_ELEMENT_ERROR (sio, RESOURCE, OPEN_READ_WRITE,
	("Only depths multiple of 8 are supported"), (NULL));
      return FALSE;
  }

  sio_initpar (&par);
  switch (GST_AUDIO_INFO_FORMAT (&spec->info)) {
  case GST_AUDIO_FORMAT_S8:
  case GST_AUDIO_FORMAT_U8:
  case GST_AUDIO_FORMAT_S16LE:
  case GST_AUDIO_FORMAT_S16BE:
  case GST_AUDIO_FORMAT_U16LE:
  case GST_AUDIO_FORMAT_U16BE:
  case GST_AUDIO_FORMAT_S32LE:
  case GST_AUDIO_FORMAT_S32BE:
  case GST_AUDIO_FORMAT_U32LE:
  case GST_AUDIO_FORMAT_U32BE:
  case GST_AUDIO_FORMAT_S24_32LE:
  case GST_AUDIO_FORMAT_S24_32BE:
  case GST_AUDIO_FORMAT_U24_32LE:
  case GST_AUDIO_FORMAT_U24_32BE:
  case GST_AUDIO_FORMAT_S24LE:
  case GST_AUDIO_FORMAT_S24BE:
  case GST_AUDIO_FORMAT_U24LE:
  case GST_AUDIO_FORMAT_U24BE:
      break;
  default:
      GST_ELEMENT_ERROR (sio, RESOURCE, OPEN_READ_WRITE,
	  ("Unsupported audio format"),
	  ("format = %d", GST_AUDIO_INFO_FORMAT (&spec->info)));
      return FALSE;
  }
  par.sig = GST_AUDIO_INFO_IS_SIGNED(&spec->info);
  par.bits = GST_AUDIO_INFO_WIDTH(&spec->info);
  par.bps = GST_AUDIO_INFO_DEPTH(&spec->info) / 8;
  if (par.bps > 1)
      par.le = GST_AUDIO_INFO_IS_LITTLE_ENDIAN(&spec->info);
  if (par.bits < par.bps * 8)
      par.msb = 0;
  par.rate = GST_AUDIO_INFO_RATE(&spec->info);
  if (sio->mode == SIO_PLAY)
      par.pchan = GST_AUDIO_INFO_CHANNELS(&spec->info);
  else
      par.rchan = GST_AUDIO_INFO_CHANNELS(&spec->info);
  par.round = par.rate / 1000000. * spec->latency_time;
  par.appbufsz = par.rate / 1000000. * spec->buffer_time;

  if (!sio_setpar (sio->hdl, &par)) {
      GST_ELEMENT_ERROR (sio, RESOURCE, OPEN_WRITE,
	("Unsupported audio encoding"), (NULL));
      return FALSE;
  }
  if (!sio_getpar (sio->hdl, &retpar)) {
      GST_ELEMENT_ERROR (sio, RESOURCE, OPEN_WRITE,
	("Couldn't get audio device parameters"), (NULL));
      return FALSE;
  }
#if 0
  fprintf(stderr, "format = %s, "
         "requested: sig = %d, bits = %d, bps = %d, le = %d, msb = %d, "
	 "rate = %d, pchan = %d, round = %d, appbufsz = %d; "
	 "returned: sig = %d, bits = %d, bps = %d, le = %d, msb = %d, "
	 "rate = %d, pchan = %d, round = %d, appbufsz = %d, bufsz = %d\n",
	 GST_AUDIO_INFO_NAME(&spec->info),
	 par.sig, par.bits, par.bps, par.le, par.msb,
	 par.rate, par.pchan, par.round, par.appbufsz,
	 retpar.sig, retpar.bits, retpar.bps, retpar.le, retpar.msb,
	 retpar.rate, retpar.pchan, retpar.round, retpar.appbufsz, retpar.bufsz);
#endif
  if (par.bits != retpar.bits ||
      par.bps != retpar.bps ||
      par.rate != retpar.rate ||
      (sio->mode == SIO_PLAY && par.pchan != retpar.pchan) ||
      (sio->mode == SIO_REC && par.rchan != retpar.rchan) ||
      (par.bps > 1 && par.le != retpar.le) ||
      (par.bits < par.bps * 8 && par.msb != retpar.msb)) {
      GST_ELEMENT_ERROR (sio, RESOURCE, OPEN_WRITE,
	("Audio device refused requested parameters"), (NULL));
      return FALSE;
  }

  nchannels = (sio->mode == SIO_PLAY) ? retpar.pchan : retpar.rchan;
  spec->segsize = retpar.round * retpar.bps * nchannels;
  spec->segtotal = retpar.bufsz / retpar.round;
  sio->bpf = retpar.bps * nchannels;
  sio->delay = 0;
  sio_onmove (sio->hdl, gst_sndio_cb, sio);

  if (!sio_start (sio->hdl)) {
    GST_ELEMENT_ERROR (sio->obj, RESOURCE, OPEN_READ_WRITE,
      ("Could not start sndio"), (NULL));
    return FALSE;
  }
  return TRUE;
}
static gboolean
gst_osx_audio_ring_buffer_acquire (GstAudioRingBuffer * buf,
    GstAudioRingBufferSpec * spec)
{
  gboolean ret = FALSE, is_passthrough = FALSE;
  GstOsxAudioRingBuffer *osxbuf;
  AudioStreamBasicDescription format;

  osxbuf = GST_OSX_AUDIO_RING_BUFFER (buf);

  if (RINGBUFFER_IS_SPDIF (spec->type)) {
    format.mFormatID = kAudioFormat60958AC3;
    format.mSampleRate = (double) GST_AUDIO_INFO_RATE (&spec->info);
    format.mChannelsPerFrame = 2;
    format.mFormatFlags = kAudioFormatFlagIsSignedInteger |
        kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonMixable;
    format.mBytesPerFrame = 0;
    format.mBitsPerChannel = 16;
    format.mBytesPerPacket = 6144;
    format.mFramesPerPacket = 1536;
    format.mReserved = 0;
    spec->segsize = 6144;
    spec->segtotal = 10;
    is_passthrough = TRUE;
  } else {
    int width, depth;
    /* Fill out the audio description we're going to be using */
    format.mFormatID = kAudioFormatLinearPCM;
    format.mSampleRate = (double) GST_AUDIO_INFO_RATE (&spec->info);
    format.mChannelsPerFrame = GST_AUDIO_INFO_CHANNELS (&spec->info);
    if (GST_AUDIO_INFO_IS_FLOAT (&spec->info)) {
      format.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
      width = depth = GST_AUDIO_INFO_WIDTH (&spec->info);
    } else {
      format.mFormatFlags = kAudioFormatFlagIsSignedInteger;
      width = GST_AUDIO_INFO_WIDTH (&spec->info);
      depth = GST_AUDIO_INFO_DEPTH (&spec->info);
      if (width == depth) {
        format.mFormatFlags |= kAudioFormatFlagIsPacked;
      } else {
        format.mFormatFlags |= kAudioFormatFlagIsAlignedHigh;
      }
    }

    if (GST_AUDIO_INFO_IS_BIG_ENDIAN (&spec->info)) {
      format.mFormatFlags |= kAudioFormatFlagIsBigEndian;
    }

    format.mBytesPerFrame = GST_AUDIO_INFO_BPF (&spec->info);
    format.mBitsPerChannel = depth;
    format.mBytesPerPacket = GST_AUDIO_INFO_BPF (&spec->info);
    format.mFramesPerPacket = 1;
    format.mReserved = 0;
    spec->segsize =
        (spec->latency_time * GST_AUDIO_INFO_RATE (&spec->info) /
        G_USEC_PER_SEC) * GST_AUDIO_INFO_BPF (&spec->info);
    spec->segtotal = spec->buffer_time / spec->latency_time;
    is_passthrough = FALSE;
  }

  GST_DEBUG_OBJECT (osxbuf, "Format: " CORE_AUDIO_FORMAT,
      CORE_AUDIO_FORMAT_ARGS (format));

  /* gst_audio_ring_buffer_set_channel_positions is not called
   * since the AUs perform channel reordering themselves.
   * (see gst_core_audio_set_channel_layout) */

  buf->size = spec->segtotal * spec->segsize;
  buf->memory = g_malloc0 (buf->size);

  ret = gst_core_audio_initialize (osxbuf->core_audio, format, spec->caps,
      is_passthrough);

  if (!ret) {
    g_free (buf->memory);
    buf->memory = NULL;
    buf->size = 0;
  }

  osxbuf->segoffset = 0;

  return ret;
}