void VideoSink::SetPlaying(bool aPlaying) { AssertOwnerThread(); VSINK_LOG_V(" playing (%d) -> (%d)", mAudioSink->IsPlaying(), aPlaying); if (!aPlaying) { // Reset any update timer if paused. mUpdateScheduler.Reset(); // Since playback is paused, tell compositor to render only current frame. RenderVideoFrames(1); if (mContainer) { mContainer->ClearCachedResources(); } } mAudioSink->SetPlaying(aPlaying); if (mHasVideo && aPlaying) { // There's no thread in VideoSink for pulling video frames, need to trigger // rendering while becoming playing status. because the VideoQueue may be // full already. TryUpdateRenderedVideoFrames(); } }
void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime, const TimeStamp& aClockTimeStamp) { AssertOwnerThread(); AutoTArray<RefPtr<MediaData>,16> frames; VideoQueue().GetFirstElements(aMaxFrames, &frames); if (frames.IsEmpty() || !mContainer) { return; } AutoTArray<ImageContainer::NonOwningImage,16> images; TimeStamp lastFrameTime; MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams(); for (uint32_t i = 0; i < frames.Length(); ++i) { VideoData* frame = frames[i]->As<VideoData>(); frame->mSentToCompositor = true; if (!frame->mImage || !frame->mImage->IsValid() || !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) { continue; } int64_t frameTime = frame->mTime; if (frameTime < 0) { // Frame times before the start time are invalid; drop such frames continue; } TimeStamp t; if (aMaxFrames > 1) { MOZ_ASSERT(!aClockTimeStamp.IsNull()); int64_t delta = frame->mTime - aClockTime; t = aClockTimeStamp + TimeDuration::FromMicroseconds(delta / params.mPlaybackRate); if (!lastFrameTime.IsNull() && t <= lastFrameTime) { // Timestamps out of order; drop the new frame. In theory we should // probably replace the previous frame with the new frame if the // timestamps are equal, but this is a corrupt video file already so // never mind. continue; } lastFrameTime = t; } ImageContainer::NonOwningImage* img = images.AppendElement(); img->mTimeStamp = t; img->mImage = frame->mImage; img->mFrameID = frame->mFrameID; img->mProducerID = mProducerID; VSINK_LOG_V("playing video frame %lld (id=%x) (vq-queued=%i)", frame->mTime, frame->mFrameID, VideoQueue().GetSize()); } mContainer->SetCurrentFrames(frames[0]->As<VideoData>()->mDisplay, images); }
void VideoSink::UpdateRenderedVideoFrames() { AssertOwnerThread(); MOZ_ASSERT(mAudioSink->IsPlaying(), "should be called while playing."); // Get the current playback position. TimeStamp nowTime; const int64_t clockTime = mAudioSink->GetPosition(&nowTime); NS_ASSERTION(clockTime >= 0, "Should have positive clock time."); // Skip frames up to the playback position. int64_t lastDisplayedFrameEndTime = 0; while (VideoQueue().GetSize() > mMinVideoQueueSize && clockTime >= VideoQueue().PeekFront()->GetEndTime()) { RefPtr<MediaData> frame = VideoQueue().PopFront(); if (frame->As<VideoData>()->mSentToCompositor) { lastDisplayedFrameEndTime = frame->GetEndTime(); mFrameStats.NotifyPresentedFrame(); } else { mFrameStats.NotifyDecodedFrames({ 0, 0, 1 }); VSINK_LOG_V("discarding video frame mTime=%lld clock_time=%lld", frame->mTime, clockTime); } } // The presentation end time of the last video frame displayed is either // the end time of the current frame, or if we dropped all frames in the // queue, the end time of the last frame we removed from the queue. RefPtr<MediaData> currentFrame = VideoQueue().PeekFront(); mVideoFrameEndTime = std::max(mVideoFrameEndTime, currentFrame ? currentFrame->GetEndTime() : lastDisplayedFrameEndTime); MaybeResolveEndPromise(); RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime); // Get the timestamp of the next frame. Schedule the next update at // the start time of the next frame. If we don't have a next frame, // we will run render loops again upon incoming frames. nsTArray<RefPtr<MediaData>> frames; VideoQueue().GetFirstElements(2, &frames); if (frames.Length() < 2) { return; } int64_t nextFrameTime = frames[1]->mTime; TimeStamp target = nowTime + TimeDuration::FromMicroseconds( (nextFrameTime - clockTime) / mAudioSink->GetPlaybackParams().mPlaybackRate); RefPtr<VideoSink> self = this; mUpdateScheduler.Ensure(target, [self] () { self->UpdateRenderedVideoFramesByTimer(); }, [self] () { self->UpdateRenderedVideoFramesByTimer(); }); }
void VideoSink::UpdateRenderedVideoFrames() { AssertOwnerThread(); MOZ_ASSERT(mAudioSink->IsPlaying(), "should be called while playing."); TimeStamp nowTime; const int64_t clockTime = mAudioSink->GetPosition(&nowTime); // Skip frames up to the frame at the playback position, and figure out // the time remaining until it's time to display the next frame and drop // the current frame. NS_ASSERTION(clockTime >= 0, "Should have positive clock time."); int64_t remainingTime = mDelayDuration; if (VideoQueue().GetSize() > 0) { RefPtr<MediaData> currentFrame = VideoQueue().PopFront(); int32_t framesRemoved = 0; while (VideoQueue().GetSize() > 0) { MediaData* nextFrame = VideoQueue().PeekFront(); if (!mRealTime && nextFrame->mTime > clockTime) { remainingTime = nextFrame->mTime - clockTime; break; } ++framesRemoved; if (!currentFrame->As<VideoData>()->mSentToCompositor) { mFrameStats.NotifyDecodedFrames(0, 0, 1); VSINK_LOG_V("discarding video frame mTime=%lld clock_time=%lld", currentFrame->mTime, clockTime); } currentFrame = VideoQueue().PopFront(); } VideoQueue().PushFront(currentFrame); if (framesRemoved > 0) { mVideoFrameEndTime = currentFrame->GetEndTime(); mFrameStats.NotifyPresentedFrame(); } } RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime); TimeStamp target = nowTime + TimeDuration::FromMicroseconds(remainingTime); RefPtr<VideoSink> self = this; mUpdateScheduler.Ensure(target, [self] () { self->UpdateRenderedVideoFramesByTimer(); }, [self] () { self->UpdateRenderedVideoFramesByTimer(); }); }
void VideoSink::UpdateRenderedVideoFrames() { AssertOwnerThread(); MOZ_ASSERT(mAudioSink->IsPlaying(), "should be called while playing."); TimeStamp nowTime; const int64_t clockTime = mAudioSink->GetPosition(&nowTime); // Skip frames up to the frame at the playback position, and figure out // the time remaining until it's time to display the next frame and drop // the current frame. NS_ASSERTION(clockTime >= 0, "Should have positive clock time."); int64_t remainingTime = -1; if (VideoQueue().GetSize() > 0) { RefPtr<MediaData> currentFrame = VideoQueue().PopFront(); int32_t framesRemoved = 0; while (VideoQueue().GetSize() > 0) { RefPtr<MediaData> nextFrame = VideoQueue().PeekFront(); if (nextFrame->mTime > clockTime) { remainingTime = nextFrame->mTime - clockTime; break; } ++framesRemoved; if (!currentFrame->As<VideoData>()->mSentToCompositor) { mFrameStats.NotifyDecodedFrames(0, 0, 1); VSINK_LOG_V("discarding video frame mTime=%lld clock_time=%lld", currentFrame->mTime, clockTime); } currentFrame = VideoQueue().PopFront(); } VideoQueue().PushFront(currentFrame); if (framesRemoved > 0) { mVideoFrameEndTime = currentFrame->GetEndTime(); mFrameStats.NotifyPresentedFrame(); } } // All frames are rendered, Let's resolve the promise. if (VideoQueue().IsFinished() && VideoQueue().GetSize() <= 1 && !mVideoSinkEndRequest.Exists()) { mEndPromiseHolder.ResolveIfExists(true, __func__); } RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime); // No next fame to render. There is no need to schedule next render // loop. We will run render loops again upon incoming frames. if (remainingTime < 0) { return; } TimeStamp target = nowTime + TimeDuration::FromMicroseconds( remainingTime / mAudioSink->GetPlaybackParams().mPlaybackRate); RefPtr<VideoSink> self = this; mUpdateScheduler.Ensure(target, [self] () { self->UpdateRenderedVideoFramesByTimer(); }, [self] () { self->UpdateRenderedVideoFramesByTimer(); }); }