void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime, const TimeStamp& aClockTimeStamp) { AssertOwnerThread(); AutoTArray<RefPtr<MediaData>,16> frames; VideoQueue().GetFirstElements(aMaxFrames, &frames); if (frames.IsEmpty() || !mContainer) { return; } AutoTArray<ImageContainer::NonOwningImage,16> images; TimeStamp lastFrameTime; MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams(); for (uint32_t i = 0; i < frames.Length(); ++i) { VideoData* frame = frames[i]->As<VideoData>(); frame->mSentToCompositor = true; if (!frame->mImage || !frame->mImage->IsValid()) { continue; } int64_t frameTime = frame->mTime; if (frameTime < 0) { // Frame times before the start time are invalid; drop such frames continue; } TimeStamp t; if (aMaxFrames > 1) { MOZ_ASSERT(!aClockTimeStamp.IsNull()); int64_t delta = frame->mTime - aClockTime; t = aClockTimeStamp + TimeDuration::FromMicroseconds(delta / params.mPlaybackRate); if (!lastFrameTime.IsNull() && t <= lastFrameTime) { // Timestamps out of order; drop the new frame. In theory we should // probably replace the previous frame with the new frame if the // timestamps are equal, but this is a corrupt video file already so // never mind. continue; } lastFrameTime = t; } ImageContainer::NonOwningImage* img = images.AppendElement(); img->mTimeStamp = t; img->mImage = frame->mImage; img->mFrameID = frame->mFrameID; img->mProducerID = mProducerID; VSINK_LOG_V("playing video frame %lld (id=%x) (vq-queued=%i)", frame->mTime, frame->mFrameID, VideoQueue().GetSize()); } mContainer->SetCurrentFrames(frames[0]->As<VideoData>()->mDisplay, images); }
void AccurateSeekTask::CancelCallbacks() { AssertOwnerThread(); mAudioCallback.DisconnectIfExists(); mVideoCallback.DisconnectIfExists(); mAudioWaitCallback.DisconnectIfExists(); mVideoWaitCallback.DisconnectIfExists(); }
void AccurateSeekTask::OnSeekRejected(nsresult aResult) { AssertOwnerThread(); mSeekRequest.Complete(); MOZ_ASSERT(NS_FAILED(aResult), "Cancels should also disconnect mSeekRequest"); RejectIfExist(__func__); }
void VideoSink::UpdateRenderedVideoFrames() { AssertOwnerThread(); MOZ_ASSERT(mAudioSink->IsPlaying(), "should be called while playing."); // Get the current playback position. TimeStamp nowTime; const int64_t clockTime = mAudioSink->GetPosition(&nowTime); NS_ASSERTION(clockTime >= 0, "Should have positive clock time."); // Skip frames up to the playback position. int64_t lastDisplayedFrameEndTime = 0; while (VideoQueue().GetSize() > mMinVideoQueueSize && clockTime >= VideoQueue().PeekFront()->GetEndTime()) { RefPtr<MediaData> frame = VideoQueue().PopFront(); if (frame->As<VideoData>()->mSentToCompositor) { lastDisplayedFrameEndTime = frame->GetEndTime(); mFrameStats.NotifyPresentedFrame(); } else { mFrameStats.NotifyDecodedFrames({ 0, 0, 1 }); VSINK_LOG_V("discarding video frame mTime=%lld clock_time=%lld", frame->mTime, clockTime); } } // The presentation end time of the last video frame displayed is either // the end time of the current frame, or if we dropped all frames in the // queue, the end time of the last frame we removed from the queue. RefPtr<MediaData> currentFrame = VideoQueue().PeekFront(); mVideoFrameEndTime = std::max(mVideoFrameEndTime, currentFrame ? currentFrame->GetEndTime() : lastDisplayedFrameEndTime); MaybeResolveEndPromise(); RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime); // Get the timestamp of the next frame. Schedule the next update at // the start time of the next frame. If we don't have a next frame, // we will run render loops again upon incoming frames. nsTArray<RefPtr<MediaData>> frames; VideoQueue().GetFirstElements(2, &frames); if (frames.Length() < 2) { return; } int64_t nextFrameTime = frames[1]->mTime; TimeStamp target = nowTime + TimeDuration::FromMicroseconds( (nextFrameTime - clockTime) / mAudioSink->GetPlaybackParams().mPlaybackRate); RefPtr<VideoSink> self = this; mUpdateScheduler.Ensure(target, [self] () { self->UpdateRenderedVideoFramesByTimer(); }, [self] () { self->UpdateRenderedVideoFramesByTimer(); }); }
nsCString DecodedStream::GetDebugInfo() { AssertOwnerThread(); return nsPrintfCString( "DecodedStream=%p mStartTime=%lld mLastOutputTime=%lld mPlaying=%d mData=%p", this, mStartTime.valueOr(-1), mLastOutputTime, mPlaying, mData.get()) + (mData ? nsCString("\n") + mData->GetDebugInfo() : nsCString()); }
void VideoSink::ConnectListener() { AssertOwnerThread(); mPushListener = VideoQueue().PushEvent().Connect( mOwnerThread, this, &VideoSink::OnVideoQueuePushed); mFinishListener = VideoQueue().FinishEvent().Connect( mOwnerThread, this, &VideoSink::OnVideoQueueFinished); }
void AccurateSeekTask::RequestAudioData() { AssertOwnerThread(); MOZ_ASSERT(!mDoneAudioSeeking); MOZ_ASSERT(!mReader->IsRequestingAudioData()); MOZ_ASSERT(!mReader->IsWaitingAudioData()); mReader->RequestAudioData(); }
void AudioSinkWrapper::SetPlaybackRate(double aPlaybackRate) { AssertOwnerThread(); mParams.playbackRate = aPlaybackRate; if (mAudioSink) { mAudioSink->SetPlaybackRate(aPlaybackRate); } }
void AudioSinkWrapper::SetVolume(double aVolume) { AssertOwnerThread(); mParams.mVolume = aVolume; if (mAudioSink) { mAudioSink->SetVolume(aVolume); } }
void VideoSink::TryUpdateRenderedVideoFrames() { AssertOwnerThread(); if (!mUpdateScheduler.IsScheduled() && VideoQueue().GetSize() >= 1 && mAudioSink->IsPlaying()) { UpdateRenderedVideoFrames(); } }
void VideoSink::Shutdown() { AssertOwnerThread(); MOZ_ASSERT(!mAudioSink->IsStarted(), "must be called after playback stops."); VSINK_LOG("[%s]", __func__); mAudioSink->Shutdown(); }
void AudioSinkWrapper::SetPreservesPitch(bool aPreservesPitch) { AssertOwnerThread(); mParams.mPreservesPitch = aPreservesPitch; if (mAudioSink) { mAudioSink->SetPreservesPitch(aPreservesPitch); } }
void AccurateSeekTask::RequestVideoData() { AssertOwnerThread(); MOZ_ASSERT(!mDoneVideoSeeking); MOZ_ASSERT(!mReader->IsRequestingVideoData()); MOZ_ASSERT(!mReader->IsWaitingVideoData()); mReader->RequestVideoData(false, media::TimeUnit()); }
void SeekTask::RequestAudioData() { AssertOwnerThread(); SAMPLE_LOG("Queueing audio task - queued=%i, decoder-queued=%o", !!mSeekedAudioData, mReader->SizeOfAudioQueueInFrames()); mReader->RequestAudioData(); }
RefPtr<GenericPromise> AudioSinkWrapper::OnEnded(TrackType aType) { AssertOwnerThread(); MOZ_ASSERT(mIsStarted, "Must be called after playback starts."); if (aType == TrackInfo::kAudioTrack) { return mEndPromise; } return nullptr; }
int64_t AudioSinkWrapper::GetVideoPosition(TimeStamp aNow) const { AssertOwnerThread(); MOZ_ASSERT(!mPlayStartTime.IsNull()); // Time elapsed since we started playing. int64_t delta = (aNow - mPlayStartTime).ToMicroseconds(); // Take playback rate into account. return mPlayDuration + delta * mParams.mPlaybackRate; }
int64_t AudioSinkWrapper::GetEndTime(TrackType aType) const { AssertOwnerThread(); MOZ_ASSERT(mIsStarted, "Must be called after playback starts."); if (aType == TrackInfo::kAudioTrack && mAudioSink) { return mAudioSink->GetEndTime(); } return -1; }
void DecodedStream::DisconnectListener() { AssertOwnerThread(); mAudioPushListener.Disconnect(); mVideoPushListener.Disconnect(); mAudioFinishListener.Disconnect(); mVideoFinishListener.Disconnect(); }
void AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams) { AssertOwnerThread(); if (mAudioSink) { mAudioSink->SetVolume(aParams.mVolume); mAudioSink->SetPlaybackRate(aParams.mPlaybackRate); mAudioSink->SetPreservesPitch(aParams.mPreservesPitch); } mParams = aParams; }
void AudioSinkWrapper::OnAudioEnded() { AssertOwnerThread(); mAudioSinkPromise.Complete(); mPlayDuration = GetPosition(); if (!mPlayStartTime.IsNull()) { mPlayStartTime = TimeStamp::Now(); } mAudioEnded = true; }
void SeekTask::OnVideoDecoded(MediaData* aVideoSample) { AssertOwnerThread(); RefPtr<MediaData> video(aVideoSample); MOZ_ASSERT(video); // The MDSM::mDecodedVideoEndTime will be updated once the whole SeekTask is // resolved. SAMPLE_LOG("OnVideoDecoded [%lld,%lld] disc=%d", (video ? video->mTime : -1), (video ? video->GetEndTime() : -1), (video ? video->mDiscontinuity : 0)); if (!Exists()) { // We've received a sample from a previous decode. Discard it. return; } if (mDropVideoUntilNextDiscontinuity) { if (video->mDiscontinuity) { mDropVideoUntilNextDiscontinuity = false; } } if (!mDropVideoUntilNextDiscontinuity) { // We must be after the discontinuity; we're receiving samples // at or after the seek target. if (mSeekJob.mTarget.IsFast() && mSeekJob.mTarget.GetTime().ToMicroseconds() > mCurrentTimeBeforeSeek && video->mTime < mCurrentTimeBeforeSeek) { // We are doing a fastSeek, but we ended up *before* the previous // playback position. This is surprising UX, so switch to an accurate // seek and decode to the seek target. This is not conformant to the // spec, fastSeek should always be fast, but until we get the time to // change all Readers to seek to the keyframe after the currentTime // in this case, we'll just decode forward. Bug 1026330. mSeekJob.mTarget.SetType(SeekTarget::Accurate); } if (mSeekJob.mTarget.IsFast()) { // Non-precise seek. We can stop the seek at the first sample. mSeekedVideoData = video; } else { // We're doing an accurate seek. We still need to discard // MediaData up to the one containing exact seek target. if (NS_FAILED(DropVideoUpToSeekTarget(video.get()))) { RejectIfExist(__func__); return; } } } CheckIfSeekComplete(); }
void VideoSink::OnVideoQueueFinished() { AssertOwnerThread(); // Run render loop if the end promise is not resolved yet. if (!mUpdateScheduler.IsScheduled() && mAudioSink->IsPlaying() && !mEndPromiseHolder.IsEmpty()) { UpdateRenderedVideoFrames(); } }
void SeekTask::RejectIfExist(const char* aCallSite) { AssertOwnerThread(); SeekTaskRejectValue val; val.mIsAudioQueueFinished = mIsAudioQueueFinished; val.mIsVideoQueueFinished = mIsVideoQueueFinished; mSeekTaskPromise.RejectIfExists(val, aCallSite); }
void AudioSinkWrapper::Stop() { AssertOwnerThread(); MOZ_ASSERT(mIsStarted, "playback not started."); mIsStarted = false; mAudioSink->Shutdown(); mAudioSink = nullptr; mEndPromise = nullptr; }
void VideoSink::MaybeResolveEndPromise() { AssertOwnerThread(); // All frames are rendered, Let's resolve the promise. if (VideoQueue().IsFinished() && VideoQueue().GetSize() <= 1 && !mVideoSinkEndRequest.Exists()) { mEndPromiseHolder.ResolveIfExists(true, __func__); } }
void AudioSinkWrapper::Start(int64_t aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); MOZ_ASSERT(!mIsStarted, "playback already started."); mIsStarted = true; mAudioSink = mCreator->Create(); mEndPromise = mAudioSink->Init(); SetPlaybackParams(mParams); }
const char* SeekTask::VideoRequestStatus() { AssertOwnerThread(); if (mReader->IsRequestingVideoData()) { MOZ_DIAGNOSTIC_ASSERT(!mReader->IsWaitingVideoData()); return "pending"; } else if (mReader->IsWaitingVideoData()) { return "waiting"; } return "idle"; }
void SeekTask::OnSeekResolved(media::TimeUnit) { AssertOwnerThread(); mSeekRequest.Complete(); // We must decode the first samples of active streams, so we can determine // the new stream time. So dispatch tasks to do that. EnsureVideoDecodeTaskQueued(); if (!mSeekJob.mTarget.IsVideoOnly()) { EnsureAudioDecodeTaskQueued(); } }
int64_t DecodedStream::GetPosition(TimeStamp* aTimeStamp) const { AssertOwnerThread(); // This is only called after MDSM starts playback. So mStartTime is // guaranteed to be something. MOZ_ASSERT(mStartTime.isSome()); if (aTimeStamp) { *aTimeStamp = TimeStamp::Now(); } return mStartTime.ref() + mLastOutputTime; }
RefPtr<SeekTask::SeekTaskPromise> SeekTask::Seek(const media::TimeUnit& aDuration) { AssertOwnerThread(); // Do the seek. mSeekRequest.Begin(mReader->Seek(mSeekJob.mTarget, aDuration) ->Then(OwnerThread(), __func__, this, &SeekTask::OnSeekResolved, &SeekTask::OnSeekRejected)); return mSeekTaskPromise.Ensure(__func__); }