Ejemplo n.º 1
0
void
AudioSink::AudioLoop()
{
  AssertOnAudioThread();
  SINK_LOG("AudioLoop started");

  if (NS_FAILED(InitializeAudioStream())) {
    NS_WARNING("Initializing AudioStream failed.");
    mStateMachine->DispatchOnAudioSinkError();
    return;
  }

  while (1) {
    {
      ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
      WaitForAudioToPlay();
      if (!IsPlaybackContinuing()) {
        break;
      }
    }
    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);

    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) + mWritten;

    CheckedInt64 missingFrames = sampleTime - playedFrames;
    if (!missingFrames.isValid() || !sampleTime.isValid()) {
      NS_WARNING("Int overflow adding in AudioLoop");
      break;
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio chunk begins some time after the end of the last chunk
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio chunk begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
      mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()));
    } else {
      mWritten += PlayFromAudioQueue();
    }
    int64_t endTime = GetEndTime();
    if (endTime != -1) {
      mOnAudioEndTimeUpdateTask->Dispatch(endTime);
    }
  }
  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
  MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream());
  if (!mStopAudioThread && mPlaying) {
    Drain();
  }
  SINK_LOG("AudioLoop complete");
  Cleanup();
  SINK_LOG("AudioLoop exit");
}
Ejemplo n.º 2
0
bool
DecodedAudioDataSink::PlayAudio()
{
  // See if there's a gap in the audio. If there is, push silence into the
  // audio hardware, so we can play across the gap.
  // Calculate the timestamp of the next chunk of audio in numbers of
  // samples.
  NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
  CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);

  // Calculate the number of frames that have been pushed onto the audio hardware.
  CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
                              static_cast<int64_t>(mWritten);

  CheckedInt64 missingFrames = sampleTime - playedFrames;
  if (!missingFrames.isValid() || !sampleTime.isValid()) {
    NS_WARNING("Int overflow adding in AudioLoop");
    return false;
  }

  if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
    // The next audio chunk begins some time after the end of the last chunk
    // we pushed to the audio hardware. We must push silence into the audio
    // hardware so that the next audio chunk begins playback at the correct
    // time.
    missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
    mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()));
  } else {
    mWritten += PlayFromAudioQueue();
  }

  return true;
}
Ejemplo n.º 3
0
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
                MediaData* aData, AudioSegment* aOutput,
                uint32_t aRate, double aVolume)
{
  MOZ_ASSERT(aData);
  AudioData* audio = aData->As<AudioData>();
  // This logic has to mimic AudioSink closely to make sure we write
  // the exact same silences
  CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                    UsecsToFrames(aStartTime, aRate);
  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);

  if (!audioWrittenOffset.isValid() ||
      !frameOffset.isValid() ||
      // ignore packet that we've already processed
      frameOffset.value() + audio->mFrames <= audioWrittenOffset.value()) {
    return;
  }

  if (audioWrittenOffset.value() < frameOffset.value()) {
    int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
    // Write silence to catch up
    AudioSegment silence;
    silence.InsertNullDataAtStart(silentFrames);
    aStream->mAudioFramesWritten += silentFrames;
    audioWrittenOffset += silentFrames;
    aOutput->AppendFrom(&silence);
  }

  MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());

  int64_t offset = audioWrittenOffset.value() - frameOffset.value();
  size_t framesToWrite = audio->mFrames - offset;

  audio->EnsureAudioBuffer();
  nsRefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
  nsAutoTArray<const AudioDataValue*, 2> channels;
  for (uint32_t i = 0; i < audio->mChannels; ++i) {
    channels.AppendElement(bufferData + i * audio->mFrames + offset);
  }
  aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
  aStream->mAudioFramesWritten += framesToWrite;
  aOutput->ApplyVolume(aVolume);

  aStream->mNextAudioTime = audio->GetEndTime();
}
Ejemplo n.º 4
0
 MediaData* Create(const media::TimeUnit& aDTS,
                   const media::TimeUnit& aDuration,
                   int64_t aOffsetInStream)
 {
   // Convert duration to frames. We add 1 to duration to account for
   // rounding errors, so we get a consistent tone.
   CheckedInt64 frames =
     UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
   if (!frames.isValid() ||
       !mChannelCount ||
       !mSampleRate ||
       frames.value() > (UINT32_MAX / mChannelCount)) {
     return nullptr;
   }
   AudioDataValue* samples = new AudioDataValue[frames.value() * mChannelCount];
   // Fill the sound buffer with an A4 tone.
   static const float pi = 3.14159265f;
   static const float noteHz = 440.0f;
   for (int i = 0; i < frames.value(); i++) {
     float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
     for (unsigned c = 0; c < mChannelCount; c++) {
       samples[i * mChannelCount + c] = AudioDataValue(f);
     }
     mFrameSum++;
   }
   return new AudioData(aOffsetInStream,
                        aDTS.ToMicroseconds(),
                        aDuration.ToMicroseconds(),
                        uint32_t(frames.value()),
                        samples,
                        mChannelCount,
                        mSampleRate);
 }
Ejemplo n.º 5
0
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
                MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
                const PrincipalHandle& aPrincipalHandle)
{
  // The amount of audio frames that is used to fuzz rounding errors.
  static const int64_t AUDIO_FUZZ_FRAMES = 1;

  MOZ_ASSERT(aData);
  AudioData* audio = aData->As<AudioData>();
  // This logic has to mimic AudioSink closely to make sure we write
  // the exact same silences
  CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                    UsecsToFrames(aStartTime, aRate);
  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);

  if (!audioWrittenOffset.isValid() ||
      !frameOffset.isValid() ||
      // ignore packet that we've already processed
      audio->GetEndTime() <= aStream->mNextAudioTime) {
    return;
  }

  if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
    int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
    // Write silence to catch up
    AudioSegment silence;
    silence.InsertNullDataAtStart(silentFrames);
    aStream->mAudioFramesWritten += silentFrames;
    audioWrittenOffset += silentFrames;
    aOutput->AppendFrom(&silence);
  }

  // Always write the whole sample without truncation to be consistent with
  // DecodedAudioDataSink::PlayFromAudioQueue()
  audio->EnsureAudioBuffer();
  RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
  AutoTArray<const AudioDataValue*, 2> channels;
  for (uint32_t i = 0; i < audio->mChannels; ++i) {
    channels.AppendElement(bufferData + i * audio->mFrames);
  }
  aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
  aStream->mAudioFramesWritten += audio->mFrames;

  aStream->mNextAudioTime = audio->GetEndTime();
}
Ejemplo n.º 6
0
// Format TimeUnit as number of frames at given rate.
CheckedInt64 TimeUnitToFrames(const TimeUnit& aTime, uint32_t aRate) {
  return UsecsToFrames(aTime.ToMicroseconds(), aRate);
}
Ejemplo n.º 7
0
void
AudioCallbackAdapter::Decoded(const nsTArray<int16_t>& aPCM, uint64_t aTimeStamp, uint32_t aChannels, uint32_t aRate)
{
  MOZ_ASSERT(IsOnGMPThread());

  if (aRate == 0 || aChannels == 0) {
    NS_WARNING("Invalid rate or num channels returned on GMP audio samples");
    mCallback->Error();
    return;
  }

  size_t numFrames = aPCM.Length() / aChannels;
  MOZ_ASSERT((aPCM.Length() % aChannels) == 0);
  nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[aPCM.Length()]);

  for (size_t i = 0; i < aPCM.Length(); ++i) {
    audioData[i] = AudioSampleToFloat(aPCM[i]);
  }

  if (mMustRecaptureAudioPosition) {
    mAudioFrameSum = 0;
    auto timestamp = UsecsToFrames(aTimeStamp, aRate);
    if (!timestamp.isValid()) {
      NS_WARNING("Invalid timestamp");
      mCallback->Error();
      return;
    }
    mAudioFrameOffset = timestamp.value();
    MOZ_ASSERT(mAudioFrameOffset >= 0);
    mMustRecaptureAudioPosition = false;
  }

  auto timestamp = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, aRate);
  if (!timestamp.isValid()) {
    NS_WARNING("Invalid timestamp on audio samples");
    mCallback->Error();
    return;
  }
  mAudioFrameSum += numFrames;

  auto duration = FramesToUsecs(numFrames, aRate);
  if (!duration.isValid()) {
    NS_WARNING("Invalid duration on audio samples");
    mCallback->Error();
    return;
  }

  nsRefPtr<AudioData> audio(new AudioData(mLastStreamOffset,
                                          timestamp.value(),
                                          duration.value(),
                                          numFrames,
                                          audioData.forget(),
                                          aChannels,
                                          aRate));

#ifdef LOG_SAMPLE_DECODE
  LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
      timestamp, duration, currentLength);
#endif

  mCallback->Output(audio);
}
Ejemplo n.º 8
0
nsresult
SeekTask::DropAudioUpToSeekTarget(MediaData* aSample)
{
  AssertOwnerThread();
  RefPtr<AudioData> audio(aSample->As<AudioData>());
  MOZ_ASSERT(audio && mSeekJob.Exists() && mSeekJob.mTarget.IsAccurate());

  CheckedInt64 sampleDuration = FramesToUsecs(audio->mFrames, mAudioRate);
  if (!sampleDuration.isValid()) {
    return NS_ERROR_FAILURE;
  }

  if (audio->mTime + sampleDuration.value() <= mSeekJob.mTarget.GetTime().ToMicroseconds()) {
    // Our seek target lies after the frames in this AudioData. Don't
    // push it onto the audio queue, and keep decoding forwards.
    return NS_OK;
  }

  if (audio->mTime > mSeekJob.mTarget.GetTime().ToMicroseconds()) {
    // The seek target doesn't lie in the audio block just after the last
    // audio frames we've seen which were before the seek target. This
    // could have been the first audio data we've seen after seek, i.e. the
    // seek terminated after the seek target in the audio stream. Just
    // abort the audio decode-to-target, the state machine will play
    // silence to cover the gap. Typically this happens in poorly muxed
    // files.
    DECODER_WARN("Audio not synced after seek, maybe a poorly muxed file?");
    mSeekedAudioData = audio;
    return NS_OK;
  }

  // The seek target lies somewhere in this AudioData's frames, strip off
  // any frames which lie before the seek target, so we'll begin playback
  // exactly at the seek target.
  NS_ASSERTION(mSeekJob.mTarget.GetTime().ToMicroseconds() >= audio->mTime,
               "Target must at or be after data start.");
  NS_ASSERTION(mSeekJob.mTarget.GetTime().ToMicroseconds() < audio->mTime + sampleDuration.value(),
               "Data must end after target.");

  CheckedInt64 framesToPrune =
    UsecsToFrames(mSeekJob.mTarget.GetTime().ToMicroseconds() - audio->mTime, mAudioRate);
  if (!framesToPrune.isValid()) {
    return NS_ERROR_FAILURE;
  }
  if (framesToPrune.value() > audio->mFrames) {
    // We've messed up somehow. Don't try to trim frames, the |frames|
    // variable below will overflow.
    DECODER_WARN("Can't prune more frames that we have!");
    return NS_ERROR_FAILURE;
  }
  uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value());
  uint32_t channels = audio->mChannels;
  AlignedAudioBuffer audioData(frames * channels);
  if (!audioData) {
    return NS_ERROR_OUT_OF_MEMORY;
  }

  memcpy(audioData.get(),
         audio->mAudioData.get() + (framesToPrune.value() * channels),
         frames * channels * sizeof(AudioDataValue));
  CheckedInt64 duration = FramesToUsecs(frames, mAudioRate);
  if (!duration.isValid()) {
    return NS_ERROR_FAILURE;
  }
  RefPtr<AudioData> data(new AudioData(audio->mOffset,
                                       mSeekJob.mTarget.GetTime().ToMicroseconds(),
                                       duration.value(),
                                       frames,
                                       Move(audioData),
                                       channels,
                                       audio->mRate));
  MOZ_ASSERT(!mSeekedAudioData, "Should be the 1st sample after seeking");
  mSeekedAudioData = data;

  return NS_OK;
}
Ejemplo n.º 9
0
// Format TimeUnit as number of frames at given rate.
CheckedInt64 TimeUnitToFrames(const TimeUnit& aTime, uint32_t aRate) {
  return aTime.IsValid() ? UsecsToFrames(aTime.ToMicroseconds(), aRate)
                         : CheckedInt64(INT64_MAX) + 1;
}
Ejemplo n.º 10
0
int64_t AudioClock::GetPositionInFrames(int64_t aFrames) const
{
  CheckedInt64 v = UsecsToFrames(GetPosition(aFrames), mInRate);
  return v.isValid() ? v.value() : -1;
}
Ejemplo n.º 11
0
nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
{
  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget));

  // Decode forward to the target frame. Start with video, if we have it.
  if (HasVideo()) {
    bool eof = false;
    int64_t startTime = -1;
    nsAutoPtr<VideoData> video;
    while (HasVideo() && !eof) {
      while (VideoQueue().GetSize() == 0 && !eof) {
        bool skip = false;
        eof = !DecodeVideoFrame(skip, 0);
        {
          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
          if (mDecoder->IsShutdown()) {
            return NS_ERROR_FAILURE;
          }
        }
      }
      if (VideoQueue().GetSize() == 0) {
        // Hit end of file, we want to display the last frame of the video.
        if (video) {
          VideoQueue().PushFront(video.forget());
        }
        break;
      }
      video = VideoQueue().PeekFront();
      // If the frame end time is less than the seek target, we won't want
      // to display this frame after the seek, so discard it.
      if (video && video->GetEndTime() <= aTarget) {
        if (startTime == -1) {
          startTime = video->mTime;
        }
        VideoQueue().PopFront();
      } else {
        video.forget();
        break;
      }
    }
    {
      ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
      if (mDecoder->IsShutdown()) {
        return NS_ERROR_FAILURE;
      }
    }
    DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime));
  }

  if (HasAudio()) {
    // Decode audio forward to the seek target.
    bool eof = false;
    while (HasAudio() && !eof) {
      while (!eof && AudioQueue().GetSize() == 0) {
        eof = !DecodeAudioData();
        {
          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
          if (mDecoder->IsShutdown()) {
            return NS_ERROR_FAILURE;
          }
        }
      }
      const AudioData* audio = AudioQueue().PeekFront();
      if (!audio)
        break;
      CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate);
      CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate);
      if (!startFrame.isValid() || !targetFrame.isValid()) {
        return NS_ERROR_FAILURE;
      }
      if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
        // Our seek target lies after the frames in this AudioData. Pop it
        // off the queue, and keep decoding forwards.
        delete AudioQueue().PopFront();
        audio = nullptr;
        continue;
      }
      if (startFrame.value() > targetFrame.value()) {
        // The seek target doesn't lie in the audio block just after the last
        // audio frames we've seen which were before the seek target. This
        // could have been the first audio data we've seen after seek, i.e. the
        // seek terminated after the seek target in the audio stream. Just
        // abort the audio decode-to-target, the state machine will play
        // silence to cover the gap. Typically this happens in poorly muxed
        // files.
        NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
        break;
      }

      // The seek target lies somewhere in this AudioData's frames, strip off
      // any frames which lie before the seek target, so we'll begin playback
      // exactly at the seek target.
      NS_ASSERTION(targetFrame.value() >= startFrame.value(),
                   "Target must at or be after data start.");
      NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
                   "Data must end after target.");

      int64_t framesToPrune = targetFrame.value() - startFrame.value();
      if (framesToPrune > audio->mFrames) {
        // We've messed up somehow. Don't try to trim frames, the |frames|
        // variable below will overflow.
        NS_WARNING("Can't prune more frames that we have!");
        break;
      }
      uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
      uint32_t channels = audio->mChannels;
      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
      memcpy(audioData.get(),
             audio->mAudioData.get() + (framesToPrune * channels),
             frames * channels * sizeof(AudioDataValue));
      CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
      if (!duration.isValid()) {
        return NS_ERROR_FAILURE;
      }
      nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
                                              aTarget,
                                              duration.value(),
                                              frames,
                                              audioData.forget(),
                                              channels));
      delete AudioQueue().PopFront();
      AudioQueue().PushFront(data.forget());
      break;
    }
  }

  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) End", aTarget));

  return NS_OK;
}
Ejemplo n.º 12
0
UniquePtr<AudioStream::Chunk>
DecodedAudioDataSink::PopFrames(uint32_t aFrames)
{
  class Chunk : public AudioStream::Chunk {
  public:
    Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
      : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
    Chunk() : mFrames(0), mData(nullptr) {}
    const AudioDataValue* Data() const { return mData; }
    uint32_t Frames() const { return mFrames; }
    uint32_t Channels() const { return mBuffer ? mBuffer->mChannels: 0; }
    uint32_t Rate() const { return mBuffer ? mBuffer->mRate : 0; }
    AudioDataValue* GetWritable() const { return mData; }
  private:
    const RefPtr<AudioData> mBuffer;
    const uint32_t mFrames;
    AudioDataValue* const mData;
  };

  class SilentChunk : public AudioStream::Chunk {
  public:
    SilentChunk(uint32_t aFrames, uint32_t aChannels, uint32_t aRate)
      : mFrames(aFrames)
      , mChannels(aChannels)
      , mRate(aRate)
      , mData(MakeUnique<AudioDataValue[]>(aChannels * aFrames)) {
      memset(mData.get(), 0, aChannels * aFrames * sizeof(AudioDataValue));
    }
    const AudioDataValue* Data() const { return mData.get(); }
    uint32_t Frames() const { return mFrames; }
    uint32_t Channels() const { return mChannels; }
    uint32_t Rate() const { return mRate; }
    AudioDataValue* GetWritable() const { return mData.get(); }
  private:
    const uint32_t mFrames;
    const uint32_t mChannels;
    const uint32_t mRate;
    UniquePtr<AudioDataValue[]> mData;
  };

  while (!mCurrentData) {
    // No data in the queue. Return an empty chunk.
    if (AudioQueue().GetSize() == 0) {
      return MakeUnique<Chunk>();
    }

    AudioData* a = AudioQueue().PeekFront()->As<AudioData>();

    // Ignore the element with 0 frames and try next.
    if (a->mFrames == 0) {
      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
      continue;
    }

    // Ignore invalid samples.
    if (a->mRate != mInfo.mRate || a->mChannels != mInfo.mChannels) {
      NS_WARNING(nsPrintfCString(
        "mismatched sample format, data=%p rate=%u channels=%u frames=%u",
        a->mAudioData.get(), a->mRate, a->mChannels, a->mFrames).get());
      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
      continue;
    }

    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);
    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
                                static_cast<int64_t>(mWritten);
    CheckedInt64 missingFrames = sampleTime - playedFrames;

    if (!missingFrames.isValid() || !sampleTime.isValid()) {
      NS_WARNING("Int overflow in DecodedAudioDataSink");
      mErrored = true;
      return MakeUnique<Chunk>();
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio chunk begins some time after the end of the last chunk
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio chunk begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
      auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
      mWritten += framesToPop;
      return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels, mInfo.mRate);
    }

    mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
    mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
                                            mCurrentData->mChannels,
                                            mCurrentData->mFrames);
    MOZ_ASSERT(mCurrentData->mFrames > 0);
  }

  auto framesToPop = std::min(aFrames, mCursor->Available());

  SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
             mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);

  UniquePtr<AudioStream::Chunk> chunk =
    MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());

  mWritten += framesToPop;
  mCursor->Advance(framesToPop);

  // All frames are popped. Reset mCurrentData so we can pop new elements from
  // the audio queue in next calls to PopFrames().
  if (mCursor->Available() == 0) {
    mCurrentData = nullptr;
  }

  return chunk;
}