Exemplo n.º 1
0
nsresult
VorbisTrackEncoder::Init(int aChannels, int aSamplingRate)
{
  NS_ENSURE_TRUE(aChannels > 0, NS_ERROR_INVALID_ARG);
  NS_ENSURE_TRUE(aChannels <= 8, NS_ERROR_INVALID_ARG);
  NS_ENSURE_TRUE(aSamplingRate >= 8000, NS_ERROR_INVALID_ARG);
  NS_ENSURE_TRUE(aSamplingRate <= 192000, NS_ERROR_INVALID_ARG);

  // This monitor is used to wake up other methods that are waiting for encoder
  // to be completely initialized.
  ReentrantMonitorAutoEnter mon(mReentrantMonitor);
  mChannels = aChannels;
  mSamplingRate = aSamplingRate;

  int ret = 0;
  vorbis_info_init(&mVorbisInfo);
  double quality = mAudioBitrate ? (double)mAudioBitrate/aSamplingRate :
                   BASE_QUALITY;

  VORBISLOG("quality %f", quality);
  ret = vorbis_encode_init_vbr(&mVorbisInfo, mChannels, mSamplingRate,
                               quality);

  mInitialized = (ret == 0);

  if (mInitialized) {
    // Set up the analysis state and auxiliary encoding storage
    vorbis_analysis_init(&mVorbisDsp, &mVorbisInfo);
    vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  }

  mon.NotifyAll();

  return ret == 0 ? NS_OK : NS_ERROR_FAILURE;
}
Exemplo n.º 2
0
nsresult
VorbisTrackEncoder::Init(int aChannels, int aSamplingRate)
{
  if (aChannels <= 0 || aChannels > 8) {
    VORBISLOG("aChannels <= 0 || aChannels > 8");
    return NS_ERROR_INVALID_ARG;
  }

  // This monitor is used to wake up other methods that are waiting for encoder
  // to be completely initialized.
  ReentrantMonitorAutoEnter mon(mReentrantMonitor);
  mChannels = aChannels;
  mSamplingRate = aSamplingRate;

  int ret = 0;
  vorbis_info_init(&mVorbisInfo);

  ret = vorbis_encode_init_vbr(&mVorbisInfo, mChannels, mSamplingRate,
                               BASE_QUALITY);

  mInitialized = (ret == 0);

  if (mInitialized) {
    // Set up the analysis state and auxiliary encoding storage
    vorbis_analysis_init(&mVorbisDsp, &mVorbisInfo);
    vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  }

  mon.NotifyAll();

  return ret == 0 ? NS_OK : NS_ERROR_FAILURE;
}
Exemplo n.º 3
0
void
VorbisTrackEncoder::GetEncodedFrames(EncodedFrameContainer& aData)
{
  // vorbis does some data preanalysis, then divvies up blocks for
  // more involved (potentially parallel) processing. Get a single
  // block for encoding now.
  while (vorbis_analysis_blockout(&mVorbisDsp, &mVorbisBlock) == 1) {
    ogg_packet oggPacket;
    if (vorbis_analysis(&mVorbisBlock, &oggPacket) == 0) {
      VORBISLOG("vorbis_analysis_blockout block size %d", oggPacket.bytes);
      EncodedFrame* audiodata = new EncodedFrame();
      audiodata->SetFrameType(EncodedFrame::AUDIO_FRAME);
      nsTArray<uint8_t> frameData;
      frameData.AppendElements(oggPacket.packet, oggPacket.bytes);
      audiodata->SetFrameData(&frameData);
      aData.AppendEncodedFrame(audiodata);
    }
  }
}
Exemplo n.º 4
0
nsresult
VorbisTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  if (mEosSetInEncoder) {
    return NS_OK;
  }

  PROFILER_LABEL("VorbisTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);

  nsAutoPtr<AudioSegment> sourceSegment;
  sourceSegment = new AudioSegment();
  {
    // Move all the samples from mRawSegment to sourceSegment. We only hold
    // the monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait if mEncoder is not initialized, or when not enough raw data, but is
    // not the end of stream nor is being canceled.
    while (!mCanceled && mRawSegment.GetDuration() < GetPacketDuration() &&
           !mEndOfStream) {
      mon.Wait();
    }
    VORBISLOG("GetEncodedTrack passes wait, duration is %lld\n",
      mRawSegment.GetDuration());
    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    sourceSegment->AppendFrom(&mRawSegment);
  }

  if (mEndOfStream && (sourceSegment->GetDuration() == 0)
      && !mEosSetInEncoder) {
    mEncodingComplete = true;
    mEosSetInEncoder = true;
    VORBISLOG("[Vorbis] Done encoding.");
    vorbis_analysis_wrote(&mVorbisDsp, 0);
    GetEncodedFrames(aData);

    return NS_OK;
  }

  // Start encoding data.
  AudioSegment::ChunkIterator iter(*sourceSegment);

  AudioDataValue **vorbisBuffer =
    vorbis_analysis_buffer(&mVorbisDsp, (int)sourceSegment->GetDuration());

  int framesCopied = 0;
  AutoTArray<AudioDataValue, 9600> interleavedPcm;
  AutoTArray<AudioDataValue, 9600> nonInterleavedPcm;
  interleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels);
  nonInterleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels);
  while (!iter.IsEnded()) {
    AudioChunk chunk = *iter;
    int frameToCopy = chunk.GetDuration();
    if (!chunk.IsNull()) {
      InterleaveTrackData(chunk, frameToCopy, mChannels,
                          interleavedPcm.Elements() + framesCopied * mChannels);
    } else { // empty data
      memset(interleavedPcm.Elements() + framesCopied * mChannels, 0,
             frameToCopy * mChannels * sizeof(AudioDataValue));
    }
    framesCopied += frameToCopy;
    iter.Next();
  }
  // De-interleave the interleavedPcm.
  DeInterleaveTrackData(interleavedPcm.Elements(), framesCopied, mChannels,
                        nonInterleavedPcm.Elements());
  // Copy the nonInterleavedPcm to vorbis buffer.
  for(uint8_t i = 0; i < mChannels; ++i) {
    memcpy(vorbisBuffer[i], nonInterleavedPcm.Elements() + framesCopied * i,
           framesCopied * sizeof(AudioDataValue));
  }

  // Now the vorbisBuffer contain the all data in non-interleaved.
  // Tell the library how much we actually submitted.
  vorbis_analysis_wrote(&mVorbisDsp, framesCopied);
  VORBISLOG("vorbis_analysis_wrote framesCopied %d\n", framesCopied);
  GetEncodedFrames(aData);

  return NS_OK;
}