uint32_t
DecodedAudioDataSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsRefPtr<AudioData> audio =
    dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());

  SINK_LOG_V("playing %u frames of audio at time %lld",
             audio->mFrames, audio->mTime);
  if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) {
    mAudioStream->Write(audio->mAudioData, audio->mFrames);
  } else {
    SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
               mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels);
    PlaySilence(audio->mFrames);
  }

  StartAudioStreamPlaybackIfNeeded();

  return audio->mFrames;
}
Example #2
0
uint32_t
AudioSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsRefPtr<AudioData> audio(AudioQueue().PopFront());

  SINK_LOG_V("playing %u frames of audio at time %lld",
             audio->mFrames, audio->mTime);
  if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) {
    mAudioStream->Write(audio->mAudioData, audio->mFrames);
  } else {
    SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
               mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels);
    PlaySilence(audio->mFrames);
  }

  StartAudioStreamPlaybackIfNeeded();

  if (audio->mOffset != -1) {
    mStateMachine->DispatchOnPlaybackOffsetUpdate(audio->mOffset);
  }
  return audio->mFrames;
}
Example #3
0
uint32_t
AudioSink::PlaySilence(uint32_t aFrames)
{
  // Maximum number of bytes we'll allocate and write at once to the audio
  // hardware when the audio stream contains missing frames and we're
  // writing silence in order to fill the gap. We limit our silence-writes
  // to 32KB in order to avoid allocating an impossibly large chunk of
  // memory if we encounter a large chunk of silence.
  const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024;

  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  uint32_t maxFrames = SILENCE_BYTES_CHUNK / mInfo.mChannels / sizeof(AudioDataValue);
  uint32_t frames = std::min(aFrames, maxFrames);
  SINK_LOG_V("playing %u frames of silence", aFrames);
  WriteSilence(frames);
  return frames;
}
Example #4
0
uint32_t
AudioSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsRefPtr<AudioData> audio(AudioQueue().PopFront());

  SINK_LOG_V("playing %u frames of audio at time %lld",
             audio->mFrames, audio->mTime);
  mAudioStream->Write(audio->mAudioData, audio->mFrames);

  StartAudioStreamPlaybackIfNeeded();

  if (audio->mOffset != -1) {
    mStateMachine->DispatchOnPlaybackOffsetUpdate(audio->mOffset);
  }
  return audio->mFrames;
}
Example #5
0
uint32_t
AudioSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsAutoPtr<AudioData> audio(AudioQueue().PopFront());
  {
    ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
    NS_WARN_IF_FALSE(mPlaying, "Should be playing");
    // Awaken the decode loop if it's waiting for space to free up in the
    // audio queue.
    GetReentrantMonitor().NotifyAll();
  }
  SINK_LOG_V("playing %u frames of audio at time %lld",
             this, audio->mFrames, audio->mTime);
  mAudioStream->Write(audio->mAudioData, audio->mFrames);

  StartAudioStreamPlaybackIfNeeded();

  if (audio->mOffset != -1) {
    mStateMachine->OnPlaybackOffsetUpdate(audio->mOffset);
  }
  return audio->mFrames;
}
Example #6
0
void
AudioSink::NotifyAudioNeeded()
{
  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(),
             "Not called from the owner's thread");

  // Always ensure we have two processed frames pending to allow for processing
  // latency.
  while (mAudioQueue.GetSize() && (mAudioQueue.IsFinished() ||
                                    mProcessedQueueLength < LOW_AUDIO_USECS ||
                                    mProcessedQueue.GetSize() < 2)) {
    RefPtr<AudioData> data = mAudioQueue.PopFront();

    // Ignore the element with 0 frames and try next.
    if (!data->mFrames) {
      continue;
    }

    if (!mConverter ||
        (data->mRate != mConverter->InputConfig().Rate() ||
         data->mChannels != mConverter->InputConfig().Channels())) {
      SINK_LOG_V("Audio format changed from %u@%uHz to %u@%uHz",
                 mConverter? mConverter->InputConfig().Channels() : 0,
                 mConverter ? mConverter->InputConfig().Rate() : 0,
                 data->mChannels, data->mRate);

      DrainConverter();

      // mFramesParsed indicates the current playtime in frames at the current
      // input sampling rate. Recalculate it per the new sampling rate.
      if (mFramesParsed) {
        // We minimize overflow.
        uint32_t oldRate = mConverter->InputConfig().Rate();
        uint32_t newRate = data->mRate;
        CheckedInt64 result = SaferMultDiv(mFramesParsed, newRate, oldRate);
        if (!result.isValid()) {
          NS_WARNING("Int overflow in AudioSink");
          mErrored = true;
          return;
        }
        mFramesParsed = result.value();
      }

      mConverter =
        MakeUnique<AudioConverter>(
          AudioConfig(data->mChannels, data->mRate),
          AudioConfig(mOutputChannels, mOutputRate));
    }

    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    CheckedInt64 sampleTime =
      TimeUnitToFrames(data->mTime - mStartTime, data->mRate);
    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 missingFrames = sampleTime - mFramesParsed;

    if (!missingFrames.isValid()) {
      NS_WARNING("Int overflow in AudioSink");
      mErrored = true;
      return;
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio packet begins some time after the end of the last packet
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio packet begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(INT32_MAX, missingFrames.value());
      mFramesParsed += missingFrames.value();

      RefPtr<AudioData> silenceData;
      AlignedAudioBuffer silenceBuffer(missingFrames.value() * data->mChannels);
       if (!silenceBuffer) {
         NS_WARNING("OOM in AudioSink");
         mErrored = true;
         return;
       }
      if (mConverter->InputConfig() != mConverter->OutputConfig()) {
        AlignedAudioBuffer convertedData =
          mConverter->Process(AudioSampleBuffer(Move(silenceBuffer))).Forget();
        silenceData = CreateAudioFromBuffer(Move(convertedData), data);
      } else {
        silenceData = CreateAudioFromBuffer(Move(silenceBuffer), data);
      }
      PushProcessedAudio(silenceData);
    }

    mLastEndTime = data->GetEndTime();
    mFramesParsed += data->mFrames;

    if (mConverter->InputConfig() != mConverter->OutputConfig()) {
      // We must ensure that the size in the buffer contains exactly the number
      // of frames, in case one of the audio producer over allocated the buffer.
      AlignedAudioBuffer buffer(Move(data->mAudioData));
      buffer.SetLength(size_t(data->mFrames) * data->mChannels);

      AlignedAudioBuffer convertedData =
        mConverter->Process(AudioSampleBuffer(Move(buffer))).Forget();
      data = CreateAudioFromBuffer(Move(convertedData), data);
    }
    if (PushProcessedAudio(data)) {
      mLastProcessedPacket = Some(data);
    }
  }

  if (mAudioQueue.IsFinished()) {
    // We have reached the end of the data, drain the resampler.
    DrainConverter();
    mProcessedQueue.Finish();
  }
}
Example #7
0
void
AudioSink::OnAudioPushed(const RefPtr<AudioData>& aSample)
{
  SINK_LOG_V("One new audio packet available.");
  NotifyAudioNeeded();
}
Example #8
0
void
AudioSink::OnAudioPopped(const RefPtr<AudioData>& aSample)
{
  SINK_LOG_V("AudioStream has used an audio packet.");
  NotifyAudioNeeded();
}
Example #9
0
UniquePtr<AudioStream::Chunk>
AudioSink::PopFrames(uint32_t aFrames)
{
  class Chunk : public AudioStream::Chunk {
  public:
    Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
      : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
    Chunk() : mFrames(0), mData(nullptr) {}
    const AudioDataValue* Data() const override { return mData; }
    uint32_t Frames() const override { return mFrames; }
    uint32_t Channels() const override { return mBuffer ? mBuffer->mChannels: 0; }
    uint32_t Rate() const override { return mBuffer ? mBuffer->mRate : 0; }
    AudioDataValue* GetWritable() const override { return mData; }
  private:
    const RefPtr<AudioData> mBuffer;
    const uint32_t mFrames;
    AudioDataValue* const mData;
  };

  bool needPopping = false;
  if (!mCurrentData) {
    // No data in the queue. Return an empty chunk.
    if (!mProcessedQueue.GetSize()) {
      return MakeUnique<Chunk>();
    }

    // We need to update our values prior popping the processed queue in
    // order to prevent the pop event to fire too early (prior
    // mProcessedQueueLength being updated) or prevent HasUnplayedFrames
    // to incorrectly return true during the time interval betweeen the
    // when mProcessedQueue is read and mWritten is updated.
    needPopping = true;
    mCurrentData = mProcessedQueue.PeekFront();
    {
      MonitorAutoLock mon(mMonitor);
      mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
                                              mCurrentData->mChannels,
                                              mCurrentData->mFrames);
    }
    MOZ_ASSERT(mCurrentData->mFrames > 0);
    mProcessedQueueLength -=
      FramesToUsecs(mCurrentData->mFrames, mOutputRate).value();
  }

  auto framesToPop = std::min(aFrames, mCursor->Available());

  SINK_LOG_V("playing audio at time=%" PRId64 " offset=%u length=%u",
             mCurrentData->mTime.ToMicroseconds(),
             mCurrentData->mFrames - mCursor->Available(), framesToPop);

  UniquePtr<AudioStream::Chunk> chunk =
    MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());

  {
    MonitorAutoLock mon(mMonitor);
    mWritten += framesToPop;
    mCursor->Advance(framesToPop);
  }

  // All frames are popped. Reset mCurrentData so we can pop new elements from
  // the audio queue in next calls to PopFrames().
  if (!mCursor->Available()) {
    mCurrentData = nullptr;
  }

  if (needPopping) {
    // We can now safely pop the audio packet from the processed queue.
    // This will fire the popped event, triggering a call to NotifyAudioNeeded.
    RefPtr<AudioData> releaseMe = mProcessedQueue.PopFront();
    CheckIsAudible(releaseMe);
  }

  return chunk;
}
Example #10
0
UniquePtr<AudioStream::Chunk>
DecodedAudioDataSink::PopFrames(uint32_t aFrames)
{
  class Chunk : public AudioStream::Chunk {
  public:
    Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
      : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
    Chunk() : mFrames(0), mData(nullptr) {}
    const AudioDataValue* Data() const { return mData; }
    uint32_t Frames() const { return mFrames; }
    uint32_t Channels() const { return mBuffer ? mBuffer->mChannels: 0; }
    uint32_t Rate() const { return mBuffer ? mBuffer->mRate : 0; }
    AudioDataValue* GetWritable() const { return mData; }
  private:
    const RefPtr<AudioData> mBuffer;
    const uint32_t mFrames;
    AudioDataValue* const mData;
  };

  class SilentChunk : public AudioStream::Chunk {
  public:
    SilentChunk(uint32_t aFrames, uint32_t aChannels, uint32_t aRate)
      : mFrames(aFrames)
      , mChannels(aChannels)
      , mRate(aRate)
      , mData(MakeUnique<AudioDataValue[]>(aChannels * aFrames)) {
      memset(mData.get(), 0, aChannels * aFrames * sizeof(AudioDataValue));
    }
    const AudioDataValue* Data() const { return mData.get(); }
    uint32_t Frames() const { return mFrames; }
    uint32_t Channels() const { return mChannels; }
    uint32_t Rate() const { return mRate; }
    AudioDataValue* GetWritable() const { return mData.get(); }
  private:
    const uint32_t mFrames;
    const uint32_t mChannels;
    const uint32_t mRate;
    UniquePtr<AudioDataValue[]> mData;
  };

  while (!mCurrentData) {
    // No data in the queue. Return an empty chunk.
    if (AudioQueue().GetSize() == 0) {
      return MakeUnique<Chunk>();
    }

    AudioData* a = AudioQueue().PeekFront()->As<AudioData>();

    // Ignore the element with 0 frames and try next.
    if (a->mFrames == 0) {
      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
      continue;
    }

    // Ignore invalid samples.
    if (a->mRate != mInfo.mRate || a->mChannels != mInfo.mChannels) {
      NS_WARNING(nsPrintfCString(
        "mismatched sample format, data=%p rate=%u channels=%u frames=%u",
        a->mAudioData.get(), a->mRate, a->mChannels, a->mFrames).get());
      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
      continue;
    }

    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);
    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
                                static_cast<int64_t>(mWritten);
    CheckedInt64 missingFrames = sampleTime - playedFrames;

    if (!missingFrames.isValid() || !sampleTime.isValid()) {
      NS_WARNING("Int overflow in DecodedAudioDataSink");
      mErrored = true;
      return MakeUnique<Chunk>();
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio chunk begins some time after the end of the last chunk
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio chunk begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
      auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
      mWritten += framesToPop;
      return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels, mInfo.mRate);
    }

    mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
    mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
                                            mCurrentData->mChannels,
                                            mCurrentData->mFrames);
    MOZ_ASSERT(mCurrentData->mFrames > 0);
  }

  auto framesToPop = std::min(aFrames, mCursor->Available());

  SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
             mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);

  UniquePtr<AudioStream::Chunk> chunk =
    MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());

  mWritten += framesToPop;
  mCursor->Advance(framesToPop);

  // All frames are popped. Reset mCurrentData so we can pop new elements from
  // the audio queue in next calls to PopFrames().
  if (mCursor->Available() == 0) {
    mCurrentData = nullptr;
  }

  return chunk;
}