示例#1
0
bool
MediaSourceDemuxer::ScanSourceBuffersForContent()
{
  MOZ_ASSERT(OnTaskQueue());

  if (mSourceBuffers.IsEmpty()) {
    return false;
  }

  MonitorAutoLock mon(mMonitor);

  bool haveEmptySourceBuffer = false;
  for (const auto& sourceBuffer : mSourceBuffers) {
    MediaInfo info = sourceBuffer->GetMetadata();
    if (!info.HasAudio() && !info.HasVideo()) {
      haveEmptySourceBuffer = true;
    }
    if (info.HasAudio() && !mAudioTrack) {
      mInfo.mAudio = info.mAudio;
      mAudioTrack = sourceBuffer;
    }
    if (info.HasVideo() && !mVideoTrack) {
      mInfo.mVideo = info.mVideo;
      mVideoTrack = sourceBuffer;
    }
    if (info.IsEncrypted() && !mInfo.IsEncrypted()) {
      mInfo.mCrypto = info.mCrypto;
    }
  }
  if (mInfo.HasAudio() && mInfo.HasVideo()) {
    // We have both audio and video. We can ignore non-ready source buffer.
    return true;
  }
  return !haveEmptySourceBuffer;
}
void MediaDecodeTask::OnMetadataRead(MetadataHolder&& aMetadata) {
  mMediaInfo = *aMetadata.mInfo;
  if (!mMediaInfo.HasAudio()) {
    mDecoderReader->Shutdown();
    ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio);
    return;
  }

  nsCString codec;
  if (!mMediaInfo.mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
    codec = nsPrintfCString(
        "webaudio; %s", mMediaInfo.mAudio.GetAsAudioInfo()->mMimeType.get());
  } else {
    codec = nsPrintfCString("webaudio;resource; %s",
                            mContainerType.Type().AsString().Data());
  }

  nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction(
      "MediaDecodeTask::OnMetadataRead", [codec]() -> void {
        MOZ_ASSERT(!codec.IsEmpty());
        MOZ_LOG(gMediaDecoderLog, LogLevel::Debug,
                ("Telemetry (WebAudio) MEDIA_CODEC_USED= '%s'", codec.get()));
        Telemetry::Accumulate(Telemetry::HistogramID::MEDIA_CODEC_USED, codec);
      });
  SystemGroup::Dispatch(TaskCategory::Other, task.forget());

  RequestSample();
}
示例#3
0
void
MediaDecodeTask::OnMetadataRead(MetadataHolder* aMetadata)
{
  mMediaInfo = aMetadata->mInfo;
  if (!mMediaInfo.HasAudio()) {
    mDecoderReader->Shutdown();
    ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio);
    return;
  }
  RequestSample();
}
void
AudioSinkWrapper::Start(int64_t aStartTime, const MediaInfo& aInfo)
{
  AssertOwnerThread();
  MOZ_ASSERT(!mIsStarted, "playback already started.");

  mIsStarted = true;
  mPlayDuration = aStartTime;
  mPlayStartTime = TimeStamp::Now();

  // no audio is equivalent to audio ended before video starts.
  mAudioEnded = !aInfo.HasAudio();

  if (aInfo.HasAudio()) {
    mAudioSink = mCreator->Create();
    mEndPromise = mAudioSink->Init(mParams);

    mAudioSinkPromise.Begin(mEndPromise->Then(
      mOwnerThread.get(), __func__, this,
      &AudioSinkWrapper::OnAudioEnded,
      &AudioSinkWrapper::OnAudioEnded));
  }
}
示例#5
0
void
MediaSourceReader::OnTrackBufferConfigured(TrackBuffer* aTrackBuffer, const MediaInfo& aInfo)
{
  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  MOZ_ASSERT(aTrackBuffer->IsReady());
  MOZ_ASSERT(mTrackBuffers.Contains(aTrackBuffer));
  if (aInfo.HasAudio() && !mAudioTrack) {
    MSE_DEBUG("MediaSourceReader(%p)::OnTrackBufferConfigured %p audio", this, aTrackBuffer);
    mAudioTrack = aTrackBuffer;
  }
  if (aInfo.HasVideo() && !mVideoTrack) {
    MSE_DEBUG("MediaSourceReader(%p)::OnTrackBufferConfigured %p video", this, aTrackBuffer);
    mVideoTrack = aTrackBuffer;
  }
  mDecoder->NotifyWaitingForResourcesStatusChanged();
}
示例#6
0
SeekTask::SeekTask(const void* aDecoderID,
                   AbstractThread* aThread,
                   MediaDecoderReaderWrapper* aReader,
                   SeekJob&& aSeekJob,
                   const MediaInfo& aInfo,
                   const media::TimeUnit& aDuration,
                   int64_t aCurrentMediaTime)
  : mDecoderID(aDecoderID)
  , mOwnerThread(aThread)
  , mReader(aReader)
  , mSeekJob(Move(aSeekJob))
  , mCurrentTimeBeforeSeek(aCurrentMediaTime)
  , mAudioRate(aInfo.mAudio.mRate)
  , mHasAudio(aInfo.HasAudio())
  , mHasVideo(aInfo.HasVideo())
  , mDropAudioUntilNextDiscontinuity(false)
  , mDropVideoUntilNextDiscontinuity(false)
  , mIsDiscarded(false)
  , mIsAudioQueueFinished(false)
  , mIsVideoQueueFinished(false)
  , mNeedToStopPrerollingAudio(false)
  , mNeedToStopPrerollingVideo(false)
{
  // Bound the seek time to be inside the media range.
  int64_t end = aDuration.ToMicroseconds();
  NS_ASSERTION(end != -1, "Should know end time by now");
  int64_t seekTime = mSeekJob.mTarget.GetTime().ToMicroseconds();
  seekTime = std::min(seekTime, end);
  seekTime = std::max(int64_t(0), seekTime);
  NS_ASSERTION(seekTime >= 0 && seekTime <= end,
               "Can only seek in range [0,duration]");
  mSeekJob.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime));

  mDropAudioUntilNextDiscontinuity = HasAudio();
  mDropVideoUntilNextDiscontinuity = HasVideo();

  // Configure MediaDecoderReaderWrapper.
  SetMediaDecoderReaderWrapperCallback();
}
示例#7
0
AccurateSeekTask::AccurateSeekTask(const void* aDecoderID,
                                   AbstractThread* aThread,
                                   MediaDecoderReaderWrapper* aReader,
                                   const SeekTarget& aTarget,
                                   const MediaInfo& aInfo,
                                   const media::TimeUnit& aEnd,
                                   int64_t aCurrentMediaTime)
  : SeekTask(aDecoderID, aThread, aReader, aTarget)
  , mCurrentTimeBeforeSeek(media::TimeUnit::FromMicroseconds(aCurrentMediaTime))
  , mAudioRate(aInfo.mAudio.mRate)
  , mDoneAudioSeeking(!aInfo.HasAudio() || aTarget.IsVideoOnly())
  , mDoneVideoSeeking(!aInfo.HasVideo())
{
  AssertOwnerThread();

  // Bound the seek time to be inside the media range.
  NS_ASSERTION(aEnd.ToMicroseconds() != -1, "Should know end time by now");
  mTarget.SetTime(std::max(media::TimeUnit(), std::min(mTarget.GetTime(), aEnd)));

  // Configure MediaDecoderReaderWrapper.
  SetCallbacks();
}