nsresult OggWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { AUTO_PROFILER_LABEL("OggWriter::WriteEncodedTrack", OTHER); uint32_t len = aData.GetEncodedFrames().Length(); for (uint32_t i = 0; i < len; i++) { if (aData.GetEncodedFrames()[i]->GetFrameType() != EncodedFrame::OPUS_AUDIO_FRAME) { LOG("[OggWriter] wrong encoded data type!"); return NS_ERROR_FAILURE; } // only pass END_OF_STREAM on the last frame! nsresult rv = WriteEncodedData(aData.GetEncodedFrames()[i]->GetFrameData(), aData.GetEncodedFrames()[i]->GetDuration(), i < len-1 ? (aFlags & ~ContainerWriter::END_OF_STREAM) : aFlags); if (NS_FAILED(rv)) { LOG("%p Failed to WriteEncodedTrack!", this); return rv; } } return NS_OK; }
nsresult WebMWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { for (uint32_t i = 0 ; i < aData.GetEncodedFrames().Length(); i++) { mEbmlComposer->WriteSimpleBlock(aData.GetEncodedFrames().ElementAt(i).get()); } return NS_OK; }
nsresult WebMWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { PROFILER_LABEL("WebMWriter", "SetMetadata", js::ProfileEntry::Category::OTHER); for (uint32_t i = 0 ; i < aData.GetEncodedFrames().Length(); i++) { mEbmlComposer->WriteSimpleBlock(aData.GetEncodedFrames().ElementAt(i).get()); } return NS_OK; }
TEST(OpusAudioTrackEncoder, FrameEncode) { const int32_t channels = 1; const int32_t sampleRate = 44100; TestOpusTrackEncoder encoder; EXPECT_TRUE(encoder.TestOpusRawCreation(channels, sampleRate)); // Generate five seconds of raw audio data. AudioGenerator generator(channels, sampleRate); AudioSegment segment; const int32_t samples = sampleRate * 5; generator.Generate(segment, samples); encoder.SetStartOffset(0); encoder.AppendAudioSegment(Move(segment)); encoder.AdvanceCurrentTime(samples); EncodedFrameContainer container; EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); // Verify that encoded data is 5 seconds long. uint64_t totalDuration = 0; for (auto& frame : container.GetEncodedFrames()) { totalDuration += frame->GetDuration(); } // 44100 as used above gets resampled to 48000 for opus. const uint64_t five = 48000 * 5; EXPECT_EQ(five, totalDuration); }
nsresult OggWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { for (uint32_t i = 0; i < aData.GetEncodedFrames().Length(); i++) { if (aData.GetEncodedFrames()[i]->GetFrameType() != EncodedFrame::OPUS_AUDIO_FRAME) { LOG("[OggWriter] wrong encoded data type!"); return NS_ERROR_FAILURE; } nsresult rv = WriteEncodedData(aData.GetEncodedFrames()[i]->GetFrameData(), aData.GetEncodedFrames()[i]->GetDuration(), aFlags); if (NS_FAILED(rv)) { LOG("%p Failed to WriteEncodedTrack!", this); return rv; } } return NS_OK; }
nsresult MediaEncoder::WriteEncodedDataToMuxer(TrackEncoder *aTrackEncoder) { if (aTrackEncoder == nullptr) { return NS_OK; } if (aTrackEncoder->IsEncodingComplete()) { return NS_OK; } PROFILER_LABEL("MediaEncoder", "WriteEncodedDataToMuxer", js::ProfileEntry::Category::OTHER); EncodedFrameContainer encodedVideoData; nsresult rv = aTrackEncoder->GetEncodedTrack(encodedVideoData); if (NS_FAILED(rv)) { // Encoding might be canceled. LOG(LogLevel::Error, ("Error! Fail to get encoded data from video encoder.")); mState = ENCODE_ERROR; return rv; } // Update timestamps to accommodate pauses const nsTArray<RefPtr<EncodedFrame> >& encodedFrames = encodedVideoData.GetEncodedFrames(); // Take a copy of the atomic so we don't continually access it uint64_t microsecondsSpentPaused = mMicrosecondsSpentPaused; for (size_t i = 0; i < encodedFrames.Length(); ++i) { RefPtr<EncodedFrame> frame = encodedFrames[i]; if (frame->GetTimeStamp() > microsecondsSpentPaused && frame->GetTimeStamp() - microsecondsSpentPaused > mLastMuxedTimestamp) { // Use the adjusted timestamp if it's after the last timestamp frame->SetTimeStamp(frame->GetTimeStamp() - microsecondsSpentPaused); } else { // If not, we force the last time stamp. We do this so the frames are // still around and in order in case the codec needs to reference them. // Dropping them here may result in artifacts in playback. frame->SetTimeStamp(mLastMuxedTimestamp); } MOZ_ASSERT(mLastMuxedTimestamp <= frame->GetTimeStamp(), "Our frames should be ordered by this point!"); mLastMuxedTimestamp = frame->GetTimeStamp(); } rv = mWriter->WriteEncodedTrack(encodedVideoData, aTrackEncoder->IsEncodingComplete() ? ContainerWriter::END_OF_STREAM : 0); if (NS_FAILED(rv)) { LOG(LogLevel::Error, ("Error! Fail to write encoded video track to the media container.")); mState = ENCODE_ERROR; } return rv; }
// Test encoding a track that starts with null data TEST(VP8VideoTrackEncoder, NullFrameFirst) { // Initiate VP8 encoder TestVP8TrackEncoder encoder; InitParam param = {true, 640, 480}; encoder.TestInit(param); YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); RefPtr<Image> image = generator.GenerateI420Image(); TimeStamp now = TimeStamp::Now(); VideoSegment segment; // Pass 2 100ms null frames to the encoder. for (uint32_t i = 0; i < 2; ++i) { segment.AppendFrame(nullptr, mozilla::StreamTime(9000), // 100ms generator.GetSize(), PRINCIPAL_HANDLE_NONE, false, now + TimeDuration::FromSeconds(i * 0.1)); } // Pass a real 100ms frame to the encoder. segment.AppendFrame(image.forget(), mozilla::StreamTime(9000), // 100ms generator.GetSize(), PRINCIPAL_HANDLE_NONE, false, now + TimeDuration::FromSeconds(0.3)); encoder.SetCurrentFrames(segment); // End the track. segment.Clear(); encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment); EncodedFrameContainer container; ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify total duration being 0.3s. uint64_t totalDuration = 0; for (auto& frame : container.GetEncodedFrames()) { totalDuration += frame->GetDuration(); } const uint64_t pointThree = (PR_USEC_PER_SEC / 10) * 3; EXPECT_EQ(pointThree, totalDuration); }
// Test encoding a track that has to skip frames. TEST(VP8VideoTrackEncoder, SkippedFrames) { // Initiate VP8 encoder TestVP8TrackEncoder encoder; InitParam param = {true, 640, 480}; encoder.TestInit(param); YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); TimeStamp now = TimeStamp::Now(); VideoSegment segment; // Pass 100 frames of the shortest possible duration where we don't get // rounding errors between input/output rate. for (uint32_t i = 0; i < 100; ++i) { segment.AppendFrame(generator.GenerateI420Image(), mozilla::StreamTime(90), // 1ms generator.GetSize(), PRINCIPAL_HANDLE_NONE, false, now + TimeDuration::FromMilliseconds(i)); } encoder.SetCurrentFrames(segment); // End the track. segment.Clear(); encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment); EncodedFrameContainer container; ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify total duration being 100 * 1ms = 100ms. uint64_t totalDuration = 0; for (auto& frame : container.GetEncodedFrames()) { totalDuration += frame->GetDuration(); } const uint64_t hundredMillis = PR_USEC_PER_SEC / 10; EXPECT_EQ(hundredMillis, totalDuration); }
// Test that encoding a single frame gives useful output. TEST(VP8VideoTrackEncoder, SingleFrameEncode) { // Initiate VP8 encoder TestVP8TrackEncoder encoder; InitParam param = {true, 640, 480}; encoder.TestInit(param); // Pass a half-second frame to the encoder. YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); VideoSegment segment; segment.AppendFrame(generator.GenerateI420Image(), mozilla::StreamTime(45000), // 1/2 second generator.GetSize(), PRINCIPAL_HANDLE_NONE); encoder.SetCurrentFrames(segment); // End the track. segment.Clear(); encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, TrackEventCommand::TRACK_EVENT_ENDED, segment); EncodedFrameContainer container; ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Read out encoded data, and verify. const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames(); const size_t oneElement = 1; ASSERT_EQ(oneElement, frames.Length()); EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType()) << "We only have one frame, so it should be a keyframe"; const uint64_t halfSecond = PR_USEC_PER_SEC / 2; EXPECT_EQ(halfSecond, frames[0]->GetDuration()); }
nsresult ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { // Muxing complete, it doesn't allowed to reentry again. if (mState == MUXING_DONE) { MOZ_ASSERT(false); return NS_ERROR_FAILURE; } FragmentBuffer* frag = nullptr; uint32_t len = aData.GetEncodedFrames().Length(); if (!len) { // no frame? why bother to WriteEncodedTrack return NS_OK; } for (uint32_t i = 0; i < len; i++) { nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]); EncodedFrame::FrameType type = frame->GetFrameType(); if (type == EncodedFrame::AAC_AUDIO_FRAME || type == EncodedFrame::AAC_CSD || type == EncodedFrame::AMR_AUDIO_FRAME || type == EncodedFrame::AMR_AUDIO_CSD) { frag = mAudioFragmentBuffer; } else if (type == EncodedFrame::AVC_I_FRAME || type == EncodedFrame::AVC_P_FRAME || type == EncodedFrame::AVC_B_FRAME || type == EncodedFrame::AVC_CSD) { frag = mVideoFragmentBuffer; } else { MOZ_ASSERT(0); return NS_ERROR_FAILURE; } frag->AddFrame(frame); } // Encoder should send CSD (codec specific data) frame before sending the // audio/video frames. When CSD data is ready, it is sufficient to generate a // moov data. If encoder doesn't send CSD yet, muxer needs to wait before // generating anything. if (mType & Audio_Track && (!mAudioFragmentBuffer || !mAudioFragmentBuffer->HasCSD())) { return NS_OK; } if (mType & Video_Track && (!mVideoFragmentBuffer || !mVideoFragmentBuffer->HasCSD())) { return NS_OK; } // Only one FrameType in EncodedFrameContainer so it doesn't need to be // inside the for-loop. if (frag && (aFlags & END_OF_STREAM)) { frag->SetEndOfStream(); } nsresult rv; bool EOS; if (ReadyToRunState(EOS)) { // TODO: // The MediaEncoder doesn't use nsRunnable, so thread will be // stocked on that part and the new added nsRunnable won't get to run // before MediaEncoder completing. Before MediaEncoder change, it needs // to call RunState directly. // https://bugzilla.mozilla.org/show_bug.cgi?id=950429 rv = RunState(); NS_ENSURE_SUCCESS(rv, rv); } return NS_OK; }
/** * Encoding flow in GetEncodedTrack(): * 1: Check the mInitialized state and the packet duration. * 2: Move the data from mRawSegment to mSourceSegment. * 3: Encode the video chunks in mSourceSegment in a for-loop. * 3.1: Pick the video chunk by mRemainingTicks. * 3.2: Calculate the encoding duration for the parameter of vpx_codec_encode(). * The encoding duration is a multiple of mEncodedFrameDuration. * 3.3: Setup the video chunk to mVPXImageWrapper by PrepareRawFrame(). * 3.4: Send frame into vp8 encoder by vpx_codec_encode(). * 3.5: Get the output frame from encoder by calling GetEncodedPartitions(). * 3.6: Calculate the mRemainingTicks for next target frame. * 3.7: Set the nextEncodeOperation for the next target frame. * There is a heuristic: If the frame duration we have processed in * mSourceSegment is 100ms, means that we can't spend more than 100ms to * encode it. * 4. Remove the encoded chunks in mSourceSegment after for-loop. * * Ex1: Input frame rate is 100 => input frame duration is 10ms for each. * mEncodedFrameRate is 30 => output frame duration is 33ms. * In this case, the frame duration in mSourceSegment will be: * 1st : 0~10ms * 2nd : 10~20ms * 3rd : 20~30ms * 4th : 30~40ms * ... * The VP8 encoder will take the 1st and 4th frames to encode. At beginning * mRemainingTicks is 0 for 1st frame, then the mRemainingTicks is set * to 23 to pick the 4th frame. (mEncodedFrameDuration - 1st frame duration) * * Ex2: Input frame rate is 25 => frame duration is 40ms for each. * mEncodedFrameRate is 30 => output frame duration is 33ms. * In this case, the frame duration in mSourceSegment will be: * 1st : 0~40ms * 2nd : 40~80ms * 3rd : 80~120ms * 4th : 120~160ms * ... * Because the input frame duration is 40ms larger than 33ms, so the first * encoded frame duration will be 66ms by calling CalculateEncodedDuration. * And the mRemainingTicks will be set to 26 * (CalculateRemainingTicks 0+66-40) in order to pick the next frame(2nd) * in mSourceSegment. */ nsresult VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { PROFILER_LABEL("VP8TrackEncoder", "GetEncodedTrack", js::ProfileEntry::Category::OTHER); bool EOS; { // Move all the samples from mRawSegment to mSourceSegment. We only hold // the monitor in this block. ReentrantMonitorAutoEnter mon(mReentrantMonitor); // Wait if mEncoder is not initialized, or when not enough raw data, but is // not the end of stream nor is being canceled. while (!mCanceled && (!mInitialized || (mRawSegment.GetDuration() + mSourceSegment.GetDuration() < mEncodedFrameDuration && !mEndOfStream))) { mon.Wait(); } if (mCanceled || mEncodingComplete) { return NS_ERROR_FAILURE; } mSourceSegment.AppendFrom(&mRawSegment); EOS = mEndOfStream; } VideoSegment::ChunkIterator iter(mSourceSegment); StreamTime durationCopied = 0; StreamTime totalProcessedDuration = 0; TimeStamp timebase = TimeStamp::Now(); EncodeOperation nextEncodeOperation = ENCODE_NORMAL_FRAME; for (; !iter.IsEnded(); iter.Next()) { VideoChunk &chunk = *iter; // Accumulate chunk's duration to durationCopied until it reaches // mRemainingTicks. durationCopied += chunk.GetDuration(); MOZ_ASSERT(mRemainingTicks <= mEncodedFrameDuration); VP8LOG("durationCopied %lld mRemainingTicks %lld\n", durationCopied, mRemainingTicks); if (durationCopied >= mRemainingTicks) { VP8LOG("nextEncodeOperation is %d\n",nextEncodeOperation); // Calculate encodedDuration for this target frame. StreamTime encodedDuration = CalculateEncodedDuration(durationCopied); // Encode frame. if (nextEncodeOperation != SKIP_FRAME) { nsresult rv = PrepareRawFrame(chunk); NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); // Encode the data with VP8 encoder int flags = (nextEncodeOperation == ENCODE_NORMAL_FRAME) ? 0 : VPX_EFLAG_FORCE_KF; if (vpx_codec_encode(mVPXContext, mVPXImageWrapper, mEncodedTimestamp, (unsigned long)encodedDuration, flags, VPX_DL_REALTIME)) { return NS_ERROR_FAILURE; } // Get the encoded data from VP8 encoder. GetEncodedPartitions(aData); } else { // SKIP_FRAME // Extend the duration of the last encoded data in aData // because this frame will be skip. nsRefPtr<EncodedFrame> last = nullptr; last = aData.GetEncodedFrames().LastElement(); if (last) { last->SetDuration(last->GetDuration() + encodedDuration); } } // Move forward the mEncodedTimestamp. mEncodedTimestamp += encodedDuration; totalProcessedDuration += durationCopied; // Calculate mRemainingTicks for next target frame. mRemainingTicks = CalculateRemainingTicks(durationCopied, encodedDuration); // Check the remain data is enough for next target frame. if (mSourceSegment.GetDuration() - totalProcessedDuration >= mEncodedFrameDuration) { TimeDuration elapsedTime = TimeStamp::Now() - timebase; nextEncodeOperation = GetNextEncodeOperation(elapsedTime, totalProcessedDuration); // Reset durationCopied for next iteration. durationCopied = 0; } else { // Process done, there is no enough data left for next iteration, // break the for-loop. break; } } } // Remove the chunks we have processed. mSourceSegment.RemoveLeading(totalProcessedDuration); VP8LOG("RemoveLeading %lld\n",totalProcessedDuration); // End of stream, pull the rest frames in encoder. if (EOS) { VP8LOG("mEndOfStream is true\n"); mEncodingComplete = true; if (vpx_codec_encode(mVPXContext, nullptr, mEncodedTimestamp, mEncodedFrameDuration, 0, VPX_DL_REALTIME)) { return NS_ERROR_FAILURE; } GetEncodedPartitions(aData); } return NS_OK ; }
/** * Encoding flow in GetEncodedTrack(): * 1: Check the mInitialized state and the packet duration. * 2: Move the data from mRawSegment to mSourceSegment. * 3: Encode the video chunks in mSourceSegment in a for-loop. * 3.1: The duration is taken straight from the video chunk's duration. * 3.2: Setup the video chunk with mVPXImageWrapper by PrepareRawFrame(). * 3.3: Pass frame to vp8 encoder by vpx_codec_encode(). * 3.4: Get the encoded frame from encoder by GetEncodedPartitions(). * 3.5: Set the nextEncodeOperation for the next target frame. * There is a heuristic: If the frame duration we have processed in * mSourceSegment is 100ms, means that we can't spend more than 100ms to * encode it. * 4. Remove the encoded chunks in mSourceSegment after for-loop. */ nsresult VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { PROFILER_LABEL("VP8TrackEncoder", "GetEncodedTrack", js::ProfileEntry::Category::OTHER); bool EOS; { // Move all the samples from mRawSegment to mSourceSegment. We only hold // the monitor in this block. ReentrantMonitorAutoEnter mon(mReentrantMonitor); // Wait if mEncoder is not initialized, or when not enough raw data, but is // not the end of stream nor is being canceled. while (!mCanceled && (!mInitialized || (mRawSegment.GetDuration() + mSourceSegment.GetDuration() == 0 && !mEndOfStream))) { mon.Wait(); } if (mCanceled || mEncodingComplete) { return NS_ERROR_FAILURE; } mSourceSegment.AppendFrom(&mRawSegment); EOS = mEndOfStream; } StreamTime totalProcessedDuration = 0; TimeStamp timebase = TimeStamp::Now(); EncodeOperation nextEncodeOperation = ENCODE_NORMAL_FRAME; for (VideoSegment::ChunkIterator iter(mSourceSegment); !iter.IsEnded(); iter.Next()) { VideoChunk &chunk = *iter; VP8LOG("nextEncodeOperation is %d for frame of duration %lld\n", nextEncodeOperation, chunk.GetDuration()); // Encode frame. if (nextEncodeOperation != SKIP_FRAME) { nsresult rv = PrepareRawFrame(chunk); NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); // Encode the data with VP8 encoder int flags = (nextEncodeOperation == ENCODE_NORMAL_FRAME) ? 0 : VPX_EFLAG_FORCE_KF; if (vpx_codec_encode(mVPXContext, mVPXImageWrapper, mEncodedTimestamp, (unsigned long)chunk.GetDuration(), flags, VPX_DL_REALTIME)) { return NS_ERROR_FAILURE; } // Get the encoded data from VP8 encoder. GetEncodedPartitions(aData); } else { // SKIP_FRAME // Extend the duration of the last encoded data in aData // because this frame will be skip. NS_WARNING("MediaRecorder lagging behind. Skipping a frame."); RefPtr<EncodedFrame> last = aData.GetEncodedFrames().LastElement(); if (last) { last->SetDuration(last->GetDuration() + chunk.GetDuration()); } } // Move forward the mEncodedTimestamp. mEncodedTimestamp += chunk.GetDuration(); totalProcessedDuration += chunk.GetDuration(); // Check what to do next. TimeDuration elapsedTime = TimeStamp::Now() - timebase; nextEncodeOperation = GetNextEncodeOperation(elapsedTime, totalProcessedDuration); } // Remove the chunks we have processed. mSourceSegment.Clear(); // End of stream, pull the rest frames in encoder. if (EOS) { VP8LOG("mEndOfStream is true\n"); mEncodingComplete = true; // Bug 1243611, keep calling vpx_codec_encode and vpx_codec_get_cx_data // until vpx_codec_get_cx_data return null. do { if (vpx_codec_encode(mVPXContext, nullptr, mEncodedTimestamp, 0, 0, VPX_DL_REALTIME)) { return NS_ERROR_FAILURE; } } while(GetEncodedPartitions(aData)); } return NS_OK ; }
nsresult ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { PROFILER_LABEL("ISOMediaWriter", "WriteEncodedTrack", js::ProfileEntry::Category::OTHER); // Muxing complete, it doesn't allowed to reentry again. if (mState == MUXING_DONE) { MOZ_ASSERT(false); return NS_ERROR_FAILURE; } FragmentBuffer* frag = nullptr; uint32_t len = aData.GetEncodedFrames().Length(); if (!len) { // no frame? why bother to WriteEncodedTrack return NS_OK; } for (uint32_t i = 0; i < len; i++) { nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]); EncodedFrame::FrameType type = frame->GetFrameType(); if (type == EncodedFrame::AAC_AUDIO_FRAME || type == EncodedFrame::AAC_CSD || type == EncodedFrame::AMR_AUDIO_FRAME || type == EncodedFrame::AMR_AUDIO_CSD) { frag = mAudioFragmentBuffer; } else if (type == EncodedFrame::AVC_I_FRAME || type == EncodedFrame::AVC_P_FRAME || type == EncodedFrame::AVC_B_FRAME || type == EncodedFrame::AVC_CSD) { frag = mVideoFragmentBuffer; } else { MOZ_ASSERT(0); return NS_ERROR_FAILURE; } frag->AddFrame(frame); } // Encoder should send CSD (codec specific data) frame before sending the // audio/video frames. When CSD data is ready, it is sufficient to generate a // moov data. If encoder doesn't send CSD yet, muxer needs to wait before // generating anything. if (mType & Audio_Track && (!mAudioFragmentBuffer || !mAudioFragmentBuffer->HasCSD())) { return NS_OK; } if (mType & Video_Track && (!mVideoFragmentBuffer || !mVideoFragmentBuffer->HasCSD())) { return NS_OK; } // Only one FrameType in EncodedFrameContainer so it doesn't need to be // inside the for-loop. if (frag && (aFlags & END_OF_STREAM)) { frag->SetEndOfStream(); } nsresult rv; bool EOS; if (ReadyToRunState(EOS)) { // Because track encoder won't generate new data after EOS, it needs to make // sure the state reaches MUXING_DONE when EOS is signaled. do { rv = RunState(); } while (EOS && mState != MUXING_DONE); NS_ENSURE_SUCCESS(rv, rv); } return NS_OK; }