コード例 #1
0
ファイル: AudioSink.cpp プロジェクト: giota-cliqz/browser-f
bool
AudioSink::IsPlaybackContinuing()
{
  AssertCurrentThreadInMonitor();
  if (mPlaying && mAudioStream->IsPaused()) {
    mAudioStream->Resume();
  }

  // If we're shutting down, captured, or at EOS, break out and exit the audio
  // thread.
  if (mStopAudioThread || AudioQueue().AtEndOfStream()) {
    return false;
  }

  UpdateStreamSettings();

  return true;
}
コード例 #2
0
ファイル: AudioSink.cpp プロジェクト: msliu/gecko-dev
uint32_t
AudioSink::PlayFromAudioQueue()
{
    AssertOnAudioThread();
    NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
    nsAutoPtr<AudioData> audio(AudioQueue().PopFront());

    SINK_LOG_V("playing %u frames of audio at time %lld",
               audio->mFrames, audio->mTime);
    mAudioStream->Write(audio->mAudioData, audio->mFrames);

    StartAudioStreamPlaybackIfNeeded();

    if (audio->mOffset != -1) {
        mStateMachine->OnPlaybackOffsetUpdate(audio->mOffset);
    }
    return audio->mFrames;
}
コード例 #3
0
uint32_t
DecodedAudioDataSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsRefPtr<AudioData> audio =
    dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());

  SINK_LOG_V("playing %u frames of audio at time %lld",
             audio->mFrames, audio->mTime);
  if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) {
    mAudioStream->Write(audio->mAudioData, audio->mFrames);
  } else {
    SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
               mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels);
    PlaySilence(audio->mFrames);
  }

  StartAudioStreamPlaybackIfNeeded();

  return audio->mFrames;
}
コード例 #4
0
ファイル: AudioSink.cpp プロジェクト: darchons/gecko-dev
uint32_t
AudioSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsRefPtr<AudioData> audio(AudioQueue().PopFront());

  SINK_LOG_V("playing %u frames of audio at time %lld",
             audio->mFrames, audio->mTime);
  if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) {
    mAudioStream->Write(audio->mAudioData, audio->mFrames);
  } else {
    SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
               mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels);
    PlaySilence(audio->mFrames);
  }

  StartAudioStreamPlaybackIfNeeded();

  if (audio->mOffset != -1) {
    mStateMachine->DispatchOnPlaybackOffsetUpdate(audio->mOffset);
  }
  return audio->mFrames;
}
コード例 #5
0
ファイル: AudioSink.cpp プロジェクト: andrenatal/gecko-dev
uint32_t
AudioSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsAutoPtr<AudioData> audio(AudioQueue().PopFront());
  {
    ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
    NS_WARN_IF_FALSE(mPlaying, "Should be playing");
    // Awaken the decode loop if it's waiting for space to free up in the
    // audio queue.
    GetReentrantMonitor().NotifyAll();
  }
  SINK_LOG_V("playing %u frames of audio at time %lld",
             this, audio->mFrames, audio->mTime);
  mAudioStream->Write(audio->mAudioData, audio->mFrames);

  StartAudioStreamPlaybackIfNeeded();

  if (audio->mOffset != -1) {
    mStateMachine->OnPlaybackOffsetUpdate(audio->mOffset);
  }
  return audio->mFrames;
}
コード例 #6
0
nsRefPtr<MediaDecoderReader::AudioDataPromise>
MediaDecoderReader::RequestAudioData()
{
  nsRefPtr<AudioDataPromise> p = mBaseAudioPromise.Ensure(__func__);
  while (AudioQueue().GetSize() == 0 &&
         !AudioQueue().IsFinished()) {
    if (!DecodeAudioData()) {
      AudioQueue().Finish();
      break;
    }
    // AudioQueue size is still zero, post a task to try again. Don't spin
    // waiting in this while loop since it somehow prevents audio EOS from
    // coming in gstreamer 1.x when there is still video buffer waiting to be
    // consumed. (|mVideoSinkBufferCount| > 0)
    if (AudioQueue().GetSize() == 0 && mTaskQueue) {
      RefPtr<nsIRunnable> task(new ReRequestAudioTask(this));
      mTaskQueue->Dispatch(task.forget());
      return p;
    }
  }
  if (AudioQueue().GetSize() > 0) {
    nsRefPtr<AudioData> a = AudioQueue().PopFront();
    if (mAudioDiscontinuity) {
      a->mDiscontinuity = true;
      mAudioDiscontinuity = false;
    }
    mBaseAudioPromise.Resolve(a, __func__);
  } else if (AudioQueue().IsFinished()) {
    mBaseAudioPromise.Reject(mHitAudioDecodeError ? DECODE_ERROR : END_OF_STREAM, __func__);
    mHitAudioDecodeError = false;
  } else {
    MOZ_ASSERT(false, "Dropping this promise on the floor");
  }

  return p;
}
コード例 #7
0
ファイル: AudioSink.cpp プロジェクト: darchons/gecko-dev
bool
AudioSink::ExpectMoreAudioData()
{
  return AudioQueue().GetSize() == 0 && !AudioQueue().IsFinished();
}
コード例 #8
0
nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
{
  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget));

  // Decode forward to the target frame. Start with video, if we have it.
  if (HasVideo()) {
    bool eof = false;
    int64_t startTime = -1;
    nsAutoPtr<VideoData> video;
    while (HasVideo() && !eof) {
      while (VideoQueue().GetSize() == 0 && !eof) {
        bool skip = false;
        eof = !DecodeVideoFrame(skip, 0);
        {
          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
          if (mDecoder->IsShutdown()) {
            return NS_ERROR_FAILURE;
          }
        }
      }
      if (VideoQueue().GetSize() == 0) {
        // Hit end of file, we want to display the last frame of the video.
        if (video) {
          VideoQueue().PushFront(video.forget());
        }
        break;
      }
      video = VideoQueue().PeekFront();
      // If the frame end time is less than the seek target, we won't want
      // to display this frame after the seek, so discard it.
      if (video && video->GetEndTime() <= aTarget) {
        if (startTime == -1) {
          startTime = video->mTime;
        }
        VideoQueue().PopFront();
      } else {
        video.forget();
        break;
      }
    }
    {
      ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
      if (mDecoder->IsShutdown()) {
        return NS_ERROR_FAILURE;
      }
    }
    DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime));
  }

  if (HasAudio()) {
    // Decode audio forward to the seek target.
    bool eof = false;
    while (HasAudio() && !eof) {
      while (!eof && AudioQueue().GetSize() == 0) {
        eof = !DecodeAudioData();
        {
          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
          if (mDecoder->IsShutdown()) {
            return NS_ERROR_FAILURE;
          }
        }
      }
      const AudioData* audio = AudioQueue().PeekFront();
      if (!audio)
        break;
      CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate);
      CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate);
      if (!startFrame.isValid() || !targetFrame.isValid()) {
        return NS_ERROR_FAILURE;
      }
      if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
        // Our seek target lies after the frames in this AudioData. Pop it
        // off the queue, and keep decoding forwards.
        delete AudioQueue().PopFront();
        audio = nullptr;
        continue;
      }
      if (startFrame.value() > targetFrame.value()) {
        // The seek target doesn't lie in the audio block just after the last
        // audio frames we've seen which were before the seek target. This
        // could have been the first audio data we've seen after seek, i.e. the
        // seek terminated after the seek target in the audio stream. Just
        // abort the audio decode-to-target, the state machine will play
        // silence to cover the gap. Typically this happens in poorly muxed
        // files.
        NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
        break;
      }

      // The seek target lies somewhere in this AudioData's frames, strip off
      // any frames which lie before the seek target, so we'll begin playback
      // exactly at the seek target.
      NS_ASSERTION(targetFrame.value() >= startFrame.value(),
                   "Target must at or be after data start.");
      NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
                   "Data must end after target.");

      int64_t framesToPrune = targetFrame.value() - startFrame.value();
      if (framesToPrune > audio->mFrames) {
        // We've messed up somehow. Don't try to trim frames, the |frames|
        // variable below will overflow.
        NS_WARNING("Can't prune more frames that we have!");
        break;
      }
      uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
      uint32_t channels = audio->mChannels;
      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
      memcpy(audioData.get(),
             audio->mAudioData.get() + (framesToPrune * channels),
             frames * channels * sizeof(AudioDataValue));
      CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
      if (!duration.isValid()) {
        return NS_ERROR_FAILURE;
      }
      nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
                                              aTarget,
                                              duration.value(),
                                              frames,
                                              audioData.forget(),
                                              channels));
      delete AudioQueue().PopFront();
      AudioQueue().PushFront(data.forget());
      break;
    }
  }

  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) End", aTarget));

  return NS_OK;
}
コード例 #9
0
ファイル: AudioSink.cpp プロジェクト: haasn/gecko-dev
void
AudioSink::AudioLoop()
{
  AssertOnAudioThread();
  SINK_LOG("AudioLoop started");

  if (NS_FAILED(InitializeAudioStream())) {
    NS_WARNING("Initializing AudioStream failed.");
    mStateMachine->DispatchOnAudioSinkError();
    return;
  }

  while (1) {
    {
      ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
      WaitForAudioToPlay();
      if (!IsPlaybackContinuing()) {
        break;
      }
    }
    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);

    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
                                static_cast<int64_t>(mWritten);

    CheckedInt64 missingFrames = sampleTime - playedFrames;
    if (!missingFrames.isValid() || !sampleTime.isValid()) {
      NS_WARNING("Int overflow adding in AudioLoop");
      break;
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio chunk begins some time after the end of the last chunk
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio chunk begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
      mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()));
    } else {
      mWritten += PlayFromAudioQueue();
    }
    int64_t endTime = GetEndTime();
    if (endTime != -1) {
      mOnAudioEndTimeUpdateTask->Dispatch(endTime);
    }
  }
  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
  MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream());
  if (!mStopAudioThread && mPlaying) {
    Drain();
  }
  SINK_LOG("AudioLoop complete");
  Cleanup();
  SINK_LOG("AudioLoop exit");
}
コード例 #10
0
bool
DecodedAudioDataSink::Ended() const
{
  // Return true when error encountered so AudioStream can start draining.
  return AudioQueue().IsFinished() || mErrored;
}
コード例 #11
0
UniquePtr<AudioStream::Chunk>
DecodedAudioDataSink::PopFrames(uint32_t aFrames)
{
  class Chunk : public AudioStream::Chunk {
  public:
    Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
      : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
    Chunk() : mFrames(0), mData(nullptr) {}
    const AudioDataValue* Data() const { return mData; }
    uint32_t Frames() const { return mFrames; }
    uint32_t Channels() const { return mBuffer ? mBuffer->mChannels: 0; }
    uint32_t Rate() const { return mBuffer ? mBuffer->mRate : 0; }
    AudioDataValue* GetWritable() const { return mData; }
  private:
    const RefPtr<AudioData> mBuffer;
    const uint32_t mFrames;
    AudioDataValue* const mData;
  };

  class SilentChunk : public AudioStream::Chunk {
  public:
    SilentChunk(uint32_t aFrames, uint32_t aChannels, uint32_t aRate)
      : mFrames(aFrames)
      , mChannels(aChannels)
      , mRate(aRate)
      , mData(MakeUnique<AudioDataValue[]>(aChannels * aFrames)) {
      memset(mData.get(), 0, aChannels * aFrames * sizeof(AudioDataValue));
    }
    const AudioDataValue* Data() const { return mData.get(); }
    uint32_t Frames() const { return mFrames; }
    uint32_t Channels() const { return mChannels; }
    uint32_t Rate() const { return mRate; }
    AudioDataValue* GetWritable() const { return mData.get(); }
  private:
    const uint32_t mFrames;
    const uint32_t mChannels;
    const uint32_t mRate;
    UniquePtr<AudioDataValue[]> mData;
  };

  while (!mCurrentData) {
    // No data in the queue. Return an empty chunk.
    if (AudioQueue().GetSize() == 0) {
      return MakeUnique<Chunk>();
    }

    AudioData* a = AudioQueue().PeekFront()->As<AudioData>();

    // Ignore the element with 0 frames and try next.
    if (a->mFrames == 0) {
      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
      continue;
    }

    // Ignore invalid samples.
    if (a->mRate != mInfo.mRate || a->mChannels != mInfo.mChannels) {
      NS_WARNING(nsPrintfCString(
        "mismatched sample format, data=%p rate=%u channels=%u frames=%u",
        a->mAudioData.get(), a->mRate, a->mChannels, a->mFrames).get());
      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
      continue;
    }

    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);
    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
                                static_cast<int64_t>(mWritten);
    CheckedInt64 missingFrames = sampleTime - playedFrames;

    if (!missingFrames.isValid() || !sampleTime.isValid()) {
      NS_WARNING("Int overflow in DecodedAudioDataSink");
      mErrored = true;
      return MakeUnique<Chunk>();
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio chunk begins some time after the end of the last chunk
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio chunk begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
      auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
      mWritten += framesToPop;
      return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels, mInfo.mRate);
    }

    mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
    mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
                                            mCurrentData->mChannels,
                                            mCurrentData->mFrames);
    MOZ_ASSERT(mCurrentData->mFrames > 0);
  }

  auto framesToPop = std::min(aFrames, mCursor->Available());

  SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
             mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);

  UniquePtr<AudioStream::Chunk> chunk =
    MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());

  mWritten += framesToPop;
  mCursor->Advance(framesToPop);

  // All frames are popped. Reset mCurrentData so we can pop new elements from
  // the audio queue in next calls to PopFrames().
  if (mCursor->Available() == 0) {
    mCurrentData = nullptr;
  }

  return chunk;
}