nsresult MediaDataBox::Write() { nsresult rv; BoxSizeChecker checker(mControl, size); Box::Write(); nsTArray<uint32_t> types; types.AppendElement(Audio_Track); types.AppendElement(Video_Track); for (uint32_t l = 0; l < types.Length(); l++) { if (mTrackType & types[l]) { FragmentBuffer* frag = mControl->GetFragment(types[l]); nsTArray<nsRefPtr<EncodedFrame>> frames; // Here is the last time we get fragment frames, flush it! rv = frag->GetFirstFragment(frames, true); NS_ENSURE_SUCCESS(rv, rv); uint32_t len = frames.Length(); for (uint32_t i = 0; i < len; i++) { mControl->Write((uint8_t*)frames.ElementAt(i)->GetFrameData().Elements(), frames.ElementAt(i)->GetFrameData().Length()); } } } return NS_OK; }
uint32_t TrackRunBox::fillSampleTable() { uint32_t table_size = 0; nsresult rv; nsTArray<nsRefPtr<EncodedFrame>> frames; FragmentBuffer* frag = mControl->GetFragment(mTrackType); rv = frag->GetFirstFragment(frames); if (NS_FAILED(rv)) { return 0; } uint32_t len = frames.Length(); sample_info_table = new tbl[len]; for (uint32_t i = 0; i < len; i++) { sample_info_table[i].sample_duration = 0; sample_info_table[i].sample_size = frames.ElementAt(i)->GetFrameData().Length(); mAllSampleSize += sample_info_table[i].sample_size; table_size += sizeof(uint32_t); if (flags.to_ulong() & flags_sample_flags_present) { sample_info_table[i].sample_flags = set_sample_flags( (frames.ElementAt(i)->GetFrameType() == EncodedFrame::I_FRAME)); table_size += sizeof(uint32_t); } else { sample_info_table[i].sample_flags = 0; } sample_info_table[i].sample_composition_time_offset = 0; } return table_size; }
nsresult AVCConfigurationBox::Generate(uint32_t* aBoxSize) { nsresult rv; FragmentBuffer* frag = mControl->GetFragment(Video_Track); rv = frag->GetCSD(avcConfig); NS_ENSURE_SUCCESS(rv, rv); size += avcConfig.Length(); *aBoxSize = size; return NS_OK; }
nsresult EVRCSpecificBox::Generate(uint32_t* aBoxSize) { nsresult rv; FragmentBuffer* frag = mControl->GetFragment(Audio_Track); rv = frag->GetCSD(evrcDecSpecInfo); NS_ENSURE_SUCCESS(rv, rv); size += evrcDecSpecInfo.Length(); *aBoxSize = size; return NS_OK; }
nsresult ES_Descriptor::Generate(uint32_t* aBoxSize) { nsresult rv; // 14496-1 '8.3.4 DecoderConfigDescriptor' // 14496-1 '10.2.3 SL Packet Header Configuration' Box::MetaHelper meta; meta.Init(mControl); FragmentBuffer* frag = mControl->GetFragment(Audio_Track); rv = frag->GetCSD(DecodeSpecificInfo); NS_ENSURE_SUCCESS(rv, rv); length = sizeof(ES_ID) + 1; length += DecodeSpecificInfo.Length(); *aBoxSize = sizeof(tag) + sizeof(length) + length; return NS_OK; }
nsresult MediaDataBox::Generate(uint32_t* aBoxSize) { mFirstSampleOffset = size; mAllSampleSize = 0; if (mTrackType & Audio_Track) { FragmentBuffer* frag = mControl->GetFragment(Audio_Track); mAllSampleSize += frag->GetFirstFragmentSampleSize(); } if (mTrackType & Video_Track) { FragmentBuffer* frag = mControl->GetFragment(Video_Track); mAllSampleSize += frag->GetFirstFragmentSampleSize(); } size += mAllSampleSize; *aBoxSize = size; return NS_OK; }
nsresult TrackRunBox::Generate(uint32_t* aBoxSize) { FragmentBuffer* frag = mControl->GetFragment(mTrackType); sample_count = frag->GetFirstFragmentSampleNumber(); size += sizeof(sample_count); // data_offset needs to be updated if there is other // TrackRunBox before this one. if (flags.to_ulong() & flags_data_offset_present) { data_offset = 0; size += sizeof(data_offset); } size += fillSampleTable(); *aBoxSize = size; return NS_OK; }
void test_smoke() { FragmentBuffer fb; NUT_TA(fb.readable_size() == 0); const void* bufs[2]; size_t lens[2]; NUT_TA(fb.readable_pointers(bufs, lens, 2) == 0); FragmentBuffer::Fragment *f = FragmentBuffer::new_fragment(10); f->size = 2; f->buffer[1] = 0x12; f = fb.write_fragment(f); NUT_TA(nullptr == f); NUT_TA(fb.readable_size() == 2); NUT_TA(fb.readable_pointers(bufs, lens, 2) == 1); NUT_TA(lens[0] == 2); f = FragmentBuffer::new_fragment(10); f->size = 2; f->buffer[0] = 0x34; f->buffer[1] = 0x56; f = fb.write_fragment(f); NUT_TA(nullptr != f); NUT_TA(fb.readable_size() == 4); NUT_TA(fb.readable_pointers(bufs, lens, 2) == 1); NUT_TA(lens[0] == 4); f->size = 7; f->buffer[0] = 0x78; f = fb.write_fragment(f); NUT_TA(nullptr == f); NUT_TA(fb.readable_size() == 11); NUT_TA(fb.readable_pointers(bufs, lens, 2) == 2); NUT_TA(lens[0] == 4 && lens[1] == 7); fb.skip_read(1); NUT_TA(fb.readable_pointers(bufs, lens, 2) == 2); NUT_TA(lens[0] == 3 && lens[1] == 7); uint16_t v = 0; fb.read(&v, 2); NUT_TA(v == 0x3412); fb.read(&v, 2); NUT_TA(v == 0x7856); NUT_TA(fb.readable_size() == 6); }
nsresult ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { // Muxing complete, it doesn't allowed to reentry again. if (mState == MUXING_DONE) { MOZ_ASSERT(false); return NS_ERROR_FAILURE; } FragmentBuffer* frag = nullptr; uint32_t len = aData.GetEncodedFrames().Length(); if (!len) { // no frame? why bother to WriteEncodedTrack return NS_OK; } for (uint32_t i = 0; i < len; i++) { nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]); EncodedFrame::FrameType type = frame->GetFrameType(); if (type == EncodedFrame::AAC_AUDIO_FRAME || type == EncodedFrame::AAC_CSD || type == EncodedFrame::AMR_AUDIO_FRAME || type == EncodedFrame::AMR_AUDIO_CSD) { frag = mAudioFragmentBuffer; } else if (type == EncodedFrame::AVC_I_FRAME || type == EncodedFrame::AVC_P_FRAME || type == EncodedFrame::AVC_B_FRAME || type == EncodedFrame::AVC_CSD) { frag = mVideoFragmentBuffer; } else { MOZ_ASSERT(0); return NS_ERROR_FAILURE; } frag->AddFrame(frame); } // Encoder should send CSD (codec specific data) frame before sending the // audio/video frames. When CSD data is ready, it is sufficient to generate a // moov data. If encoder doesn't send CSD yet, muxer needs to wait before // generating anything. if (mType & Audio_Track && (!mAudioFragmentBuffer || !mAudioFragmentBuffer->HasCSD())) { return NS_OK; } if (mType & Video_Track && (!mVideoFragmentBuffer || !mVideoFragmentBuffer->HasCSD())) { return NS_OK; } // Only one FrameType in EncodedFrameContainer so it doesn't need to be // inside the for-loop. if (frag && (aFlags & END_OF_STREAM)) { frag->SetEndOfStream(); } nsresult rv; bool EOS; if (ReadyToRunState(EOS)) { // TODO: // The MediaEncoder doesn't use nsRunnable, so thread will be // stocked on that part and the new added nsRunnable won't get to run // before MediaEncoder completing. Before MediaEncoder change, it needs // to call RunState directly. // https://bugzilla.mozilla.org/show_bug.cgi?id=950429 rv = RunState(); NS_ENSURE_SUCCESS(rv, rv); } return NS_OK; }
nsresult ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, uint32_t aFlags) { PROFILER_LABEL("ISOMediaWriter", "WriteEncodedTrack", js::ProfileEntry::Category::OTHER); // Muxing complete, it doesn't allowed to reentry again. if (mState == MUXING_DONE) { MOZ_ASSERT(false); return NS_ERROR_FAILURE; } FragmentBuffer* frag = nullptr; uint32_t len = aData.GetEncodedFrames().Length(); if (!len) { // no frame? why bother to WriteEncodedTrack return NS_OK; } for (uint32_t i = 0; i < len; i++) { nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]); EncodedFrame::FrameType type = frame->GetFrameType(); if (type == EncodedFrame::AAC_AUDIO_FRAME || type == EncodedFrame::AAC_CSD || type == EncodedFrame::AMR_AUDIO_FRAME || type == EncodedFrame::AMR_AUDIO_CSD) { frag = mAudioFragmentBuffer; } else if (type == EncodedFrame::AVC_I_FRAME || type == EncodedFrame::AVC_P_FRAME || type == EncodedFrame::AVC_B_FRAME || type == EncodedFrame::AVC_CSD) { frag = mVideoFragmentBuffer; } else { MOZ_ASSERT(0); return NS_ERROR_FAILURE; } frag->AddFrame(frame); } // Encoder should send CSD (codec specific data) frame before sending the // audio/video frames. When CSD data is ready, it is sufficient to generate a // moov data. If encoder doesn't send CSD yet, muxer needs to wait before // generating anything. if (mType & Audio_Track && (!mAudioFragmentBuffer || !mAudioFragmentBuffer->HasCSD())) { return NS_OK; } if (mType & Video_Track && (!mVideoFragmentBuffer || !mVideoFragmentBuffer->HasCSD())) { return NS_OK; } // Only one FrameType in EncodedFrameContainer so it doesn't need to be // inside the for-loop. if (frag && (aFlags & END_OF_STREAM)) { frag->SetEndOfStream(); } nsresult rv; bool EOS; if (ReadyToRunState(EOS)) { // Because track encoder won't generate new data after EOS, it needs to make // sure the state reaches MUXING_DONE when EOS is signaled. do { rv = RunState(); } while (EOS && mState != MUXING_DONE); NS_ENSURE_SUCCESS(rv, rv); } return NS_OK; }
uint32_t TrackRunBox::fillSampleTable() { uint32_t table_size = 0; nsresult rv; nsTArray<RefPtr<EncodedFrame>> frames; FragmentBuffer* frag = mControl->GetFragment(mTrackType); rv = frag->GetFirstFragment(frames); if (NS_FAILED(rv)) { return 0; } uint32_t len = frames.Length(); sample_info_table = MakeUnique<tbl[]>(len); // Create sample table according to 14496-12 8.8.8.2. for (uint32_t i = 0; i < len; i++) { // Sample size. sample_info_table[i].sample_size = 0; if (flags.to_ulong() & flags_sample_size_present) { sample_info_table[i].sample_size = frames.ElementAt(i)->GetFrameData().Length(); mAllSampleSize += sample_info_table[i].sample_size; table_size += sizeof(uint32_t); } // Sample flags. sample_info_table[i].sample_flags = 0; if (flags.to_ulong() & flags_sample_flags_present) { sample_info_table[i].sample_flags = set_sample_flags( (frames.ElementAt(i)->GetFrameType() == EncodedFrame::AVC_I_FRAME)); table_size += sizeof(uint32_t); } // Sample duration. sample_info_table[i].sample_duration = 0; if (flags.to_ulong() & flags_sample_duration_present) { // Calculate each frame's duration, it is decided by "current frame // timestamp - last frame timestamp". uint64_t frame_time = 0; if (i == 0) { frame_time = frames.ElementAt(i)->GetTimeStamp() - frag->GetLastFragmentLastFrameTime(); } else { frame_time = frames.ElementAt(i)->GetTimeStamp() - frames.ElementAt(i - 1)->GetTimeStamp(); // Keep the last frame time of current fagment, it will be used to calculate // the first frame duration of next fragment. if ((len - 1) == i) { frag->SetLastFragmentLastFrameTime(frames.ElementAt(i)->GetTimeStamp()); } } // In TrackRunBox, there should be exactly one type, either audio or video. MOZ_ASSERT((mTrackType & Video_Track) ^ (mTrackType & Audio_Track)); sample_info_table[i].sample_duration = (mTrackType & Video_Track ? frame_time * mVideoMeta->GetVideoClockRate() / USECS_PER_S : frame_time * mAudioMeta->GetAudioSampleRate() / USECS_PER_S); table_size += sizeof(uint32_t); } sample_info_table[i].sample_composition_time_offset = 0; } return table_size; }