/*---------------------------------------------------------------------- | AP4_HintTrackReader::GetCurrentTimeStampMs +---------------------------------------------------------------------*/ AP4_UI32 AP4_HintTrackReader::GetCurrentTimeStampMs() { return (AP4_UI32)AP4_ConvertTime(m_CurrentHintSample.GetCts(), m_HintTrack.GetMediaTimeScale(), 1000); }
/*---------------------------------------------------------------------- | AP4_Track::GetSampleIndexForTimeStampMs +---------------------------------------------------------------------*/ AP4_Result AP4_Track::GetSampleIndexForTimeStampMs(AP4_TimeStamp ts, AP4_Ordinal& index) { // convert the ts in the timescale of the track's media ts = AP4_ConvertTime(ts, 1000, m_MediaTimeScale); return m_SampleTable->GetSampleIndexForTimeStamp(ts, index); }
/*---------------------------------------------------------------------- | AutoDetectAudioFragmentDuration +---------------------------------------------------------------------*/ static unsigned int AutoDetectAudioFragmentDuration(AP4_ByteStream& stream, TrackCursor* cursor) { // remember where we are in the stream AP4_Position where = 0; stream.Tell(where); AP4_LargeSize stream_size = 0; stream.GetSize(stream_size); AP4_LargeSize bytes_available = stream_size-where; AP4_UI64 fragment_count = 0; AP4_UI32 last_fragment_size = 0; AP4_Atom* atom = NULL; while (AP4_SUCCEEDED(AP4_DefaultAtomFactory::Instance.CreateAtomFromStream(stream, bytes_available, atom))) { if (atom && atom->GetType() == AP4_ATOM_TYPE_MOOF) { AP4_ContainerAtom* moof = AP4_DYNAMIC_CAST(AP4_ContainerAtom, atom); AP4_TfhdAtom* tfhd = AP4_DYNAMIC_CAST(AP4_TfhdAtom, moof->FindChild("traf/tfhd")); if (tfhd && tfhd->GetTrackId() == cursor->m_Track->GetId()) { ++fragment_count; AP4_TrunAtom* trun = AP4_DYNAMIC_CAST(AP4_TrunAtom, moof->FindChild("traf/trun")); if (trun) { last_fragment_size = trun->GetEntries().ItemCount(); } } } delete atom; atom = NULL; } // restore the stream to its original position stream.Seek(where); // decide if we can infer an fragment size if (fragment_count == 0 || cursor->m_Samples->GetSampleCount() == 0) { return 0; } // don't count the last fragment if we have more than one if (fragment_count > 1 && last_fragment_size) { --fragment_count; } if (fragment_count <= 1 || cursor->m_Samples->GetSampleCount() < last_fragment_size) { last_fragment_size = 0; } AP4_Sample sample; AP4_UI64 total_duration = 0; for (unsigned int i=0; i<cursor->m_Samples->GetSampleCount()-last_fragment_size; i++) { cursor->m_Samples->GetSample(i, sample); total_duration += sample.GetDuration(); } return (unsigned int)AP4_ConvertTime(total_duration/fragment_count, cursor->m_Track->GetMediaTimeScale(), 1000); }
/*---------------------------------------------------------------------- | AP4_Track::SetMovieTimeScale +---------------------------------------------------------------------*/ AP4_Result AP4_Track::SetMovieTimeScale(AP4_UI32 time_scale) { // check that we can convert if (m_MovieTimeScale == 0) return AP4_FAILURE; // convert from one time scale to the other m_TrakAtom->SetDuration(AP4_ConvertTime(m_TrakAtom->GetDuration(), m_MovieTimeScale, time_scale)); // keep the new movie timescale m_MovieTimeScale = time_scale; return AP4_SUCCESS; }
int8_t Mp4Writer::WriteFile(int frameRate) { // frameRate should be in frames/second // adjust the sample CTS/DTS offsets based on the sample orders if (m_sample_orders.ItemCount() > 1) { unsigned int start = 0; for (unsigned int i=1; i<=m_sample_orders.ItemCount(); i++) { if (i == m_sample_orders.ItemCount() || m_sample_orders[i].m_DisplayOrder == 0) { // we got to the end of the GOP, sort it by display order mp_sample_storage->SortSamples(&m_sample_orders[start], i-start); start = i; } } } unsigned int max_delta = 0; for (unsigned int i=0; i<m_sample_orders.ItemCount(); i++) { if (m_sample_orders[i].m_DecodeOrder > i) { unsigned int delta =m_sample_orders[i].m_DecodeOrder-i; if (delta > max_delta) { max_delta = delta; } } } for (unsigned int i=0; i<m_sample_orders.ItemCount(); i++) { mp_sample_table->UseSample(m_sample_orders[i].m_DecodeOrder).SetCts(1000ULL*(AP4_UI64)(i+max_delta)); } // check the video parameters AP4_AvcSequenceParameterSet* sps = NULL; for (unsigned int i=0; i<=AP4_AVC_SPS_MAX_ID; i++) { if (m_parser.GetSequenceParameterSets()[i]) { sps = m_parser.GetSequenceParameterSets()[i]; break; } } if (sps == NULL) { m_errorMsg = "ERROR: no sequence parameter set found in video"; return -1; } unsigned int video_width = 0; unsigned int video_height = 0; sps->GetInfo(video_width, video_height); // collect the SPS and PPS into arrays AP4_Array<AP4_DataBuffer> sps_array; for (unsigned int i=0; i<=AP4_AVC_SPS_MAX_ID; i++) { if (m_parser.GetSequenceParameterSets()[i]) { sps_array.Append(m_parser.GetSequenceParameterSets()[i]->raw_bytes); } } AP4_Array<AP4_DataBuffer> pps_array; for (unsigned int i=0; i<=AP4_AVC_PPS_MAX_ID; i++) { if (m_parser.GetPictureParameterSets()[i]) { pps_array.Append(m_parser.GetPictureParameterSets()[i]->raw_bytes); } } // setup the video the sample descripton AP4_AvcSampleDescription* sample_description = new AP4_AvcSampleDescription(AP4_SAMPLE_FORMAT_AVC1, video_width, video_height, 24, "h264", sps->profile_idc, sps->level_idc, sps->constraint_set0_flag<<7 | sps->constraint_set1_flag<<6 | sps->constraint_set2_flag<<5 | sps->constraint_set3_flag<<4, 4, sps_array, pps_array); mp_sample_table->AddSampleDescription(sample_description); // TODO: set frame rate here m_frameRate = frameRate * 1000; AP4_UI32 movie_timescale = 1000; AP4_UI32 media_timescale = m_frameRate; AP4_UI64 video_track_duration = AP4_ConvertTime(1000*mp_sample_table->GetSampleCount(), media_timescale, movie_timescale); AP4_UI64 video_media_duration = 1000*mp_sample_table->GetSampleCount(); // create a video track AP4_Track* track = new AP4_Track(AP4_Track::TYPE_VIDEO, mp_sample_table, 0, // auto-select track id movie_timescale, // movie time scale video_track_duration, // track duration m_frameRate, // media time scale video_media_duration, // media duration "und", // language video_width<<16, // width video_height<<16 // height ); // update the brands list m_brands.Append(AP4_FILE_BRAND_AVC1); mp_movie->AddTrack(track); // open the output AP4_Result result; AP4_ByteStream* output = NULL; result = AP4_FileByteStream::Create(m_outputFilename.c_str(), AP4_FileByteStream::STREAM_MODE_WRITE, output); if (AP4_FAILED(result)) { m_errorMsg = "ERROR: cannot open output file"; return -1; } // create a multimedia file AP4_File file(mp_movie); // set the file type file.SetFileType(AP4_FILE_BRAND_MP42, 1, &m_brands[0], m_brands.ItemCount()); // write the file to the output AP4_FileWriter::Write(file, *output); // cleanup output->Release(); }
/*---------------------------------------------------------------------- | Fragment +---------------------------------------------------------------------*/ static void Fragment(AP4_File& input_file, AP4_ByteStream& output_stream, AP4_Array<TrackCursor*>& cursors, unsigned int fragment_duration, AP4_UI32 timescale, AP4_UI32 track_id, bool create_segment_index) { AP4_List<FragmentInfo> fragments; TrackCursor* index_cursor = NULL; AP4_Result result; AP4_Movie* input_movie = input_file.GetMovie(); if (input_movie == NULL) { fprintf(stderr, "ERROR: no moov found in the input file\n"); return; } // create the output file object AP4_Movie* output_movie = new AP4_Movie(1000); // create an mvex container AP4_ContainerAtom* mvex = new AP4_ContainerAtom(AP4_ATOM_TYPE_MVEX); AP4_MehdAtom* mehd = new AP4_MehdAtom(0); mvex->AddChild(mehd); // add an output track for each track in the input file for (unsigned int i=0; i<cursors.ItemCount(); i++) { AP4_Track* track = cursors[i]->m_Track; // skip non matching tracks if we have a selector if (track_id && track->GetId() != track_id) { continue; } result = cursors[i]->Init(); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to init sample cursor (%d), skipping track %d\n", result, track->GetId()); return; } // create a sample table (with no samples) to hold the sample description AP4_SyntheticSampleTable* sample_table = new AP4_SyntheticSampleTable(); for (unsigned int j=0; j<track->GetSampleDescriptionCount(); j++) { AP4_SampleDescription* sample_description = track->GetSampleDescription(j); sample_table->AddSampleDescription(sample_description, false); } // create the track AP4_Track* output_track = new AP4_Track(sample_table, track->GetId(), timescale?timescale:1000, AP4_ConvertTime(track->GetDuration(), input_movie->GetTimeScale(), timescale?timescale:1000), timescale?timescale:track->GetMediaTimeScale(), 0,//track->GetMediaDuration(), track); output_movie->AddTrack(output_track); // add a trex entry to the mvex container AP4_TrexAtom* trex = new AP4_TrexAtom(track->GetId(), 1, 0, 0, 0); mvex->AddChild(trex); } // select the anchor cursor TrackCursor* anchor_cursor = NULL; for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (cursors[i]->m_Track->GetId() == track_id) { anchor_cursor = cursors[i]; } } if (anchor_cursor == NULL) { for (unsigned int i=0; i<cursors.ItemCount(); i++) { // use this as the anchor track if it is the first video track if (cursors[i]->m_Track->GetType() == AP4_Track::TYPE_VIDEO) { anchor_cursor = cursors[i]; break; } } } if (anchor_cursor == NULL) { // no video track to anchor with, pick the first audio track for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (cursors[i]->m_Track->GetType() == AP4_Track::TYPE_AUDIO) { anchor_cursor = cursors[i]; break; } } // no audio track to anchor with, pick the first subtitles track for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (cursors[i]->m_Track->GetType() == AP4_Track::TYPE_SUBTITLES) { anchor_cursor = cursors[i]; break; } } } if (anchor_cursor == NULL) { // this shoudl never happen fprintf(stderr, "ERROR: no anchor track\n"); return; } if (create_segment_index) { index_cursor = anchor_cursor; } if (Options.debug) { printf("Using track ID %d as anchor\n", anchor_cursor->m_Track->GetId()); } // update the mehd duration mehd->SetDuration(output_movie->GetDuration()); // add the mvex container to the moov container output_movie->GetMoovAtom()->AddChild(mvex); // compute all the fragments unsigned int sequence_number = 1; for(;;) { TrackCursor* cursor = NULL; // pick the first track with a fragment index lower than the anchor's for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (track_id && cursors[i]->m_Track->GetId() != track_id) continue; if (cursors[i]->m_Eos) continue; if (cursors[i]->m_FragmentIndex < anchor_cursor->m_FragmentIndex) { cursor = cursors[i]; break; } } // check if we found a non-anchor cursor to use if (cursor == NULL) { // the anchor should be used in this round, check if we can use it if (anchor_cursor->m_Eos) { // the anchor is done, pick a new anchor unless we need to trim anchor_cursor = NULL; if (!Options.trim) { for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (track_id && cursors[i]->m_Track->GetId() != track_id) continue; if (cursors[i]->m_Eos) continue; if (anchor_cursor == NULL || cursors[i]->m_Track->GetType() == AP4_Track::TYPE_VIDEO || cursors[i]->m_Track->GetType() == AP4_Track::TYPE_AUDIO) { anchor_cursor = cursors[i]; if (Options.debug) { printf("+++ New anchor: Track ID %d\n", anchor_cursor->m_Track->GetId()); } } } } } cursor = anchor_cursor; } if (cursor == NULL) break; // all done // decide how many samples go into this fragment AP4_UI64 target_dts; if (cursor == anchor_cursor) { // compute the current dts in milliseconds AP4_UI64 anchor_dts_ms = AP4_ConvertTime(cursor->m_Sample.GetDts(), cursor->m_Track->GetMediaTimeScale(), 1000); // round to the nearest multiple of fragment_duration AP4_UI64 anchor_position = (anchor_dts_ms + (fragment_duration/2))/fragment_duration; // pick the next fragment_duration multiple at our target target_dts = AP4_ConvertTime(fragment_duration*(anchor_position+1), 1000, cursor->m_Track->GetMediaTimeScale()); } else { target_dts = AP4_ConvertTime(anchor_cursor->m_Sample.GetDts(), anchor_cursor->m_Track->GetMediaTimeScale(), cursor->m_Track->GetMediaTimeScale()); if (target_dts <= cursor->m_Sample.GetDts()) { // we must be at the end, past the last anchor sample, just use the target duration target_dts = AP4_ConvertTime(fragment_duration*(cursor->m_FragmentIndex+1), 1000, cursor->m_Track->GetMediaTimeScale()); if (target_dts <= cursor->m_Sample.GetDts()) { // we're still behind, there may have been an alignment/rounding error, just advance by one segment duration target_dts = cursor->m_Sample.GetDts()+AP4_ConvertTime(fragment_duration, 1000, cursor->m_Track->GetMediaTimeScale()); } } } unsigned int end_sample_index = cursor->m_Samples->GetSampleCount(); AP4_UI64 smallest_diff = (AP4_UI64)(0xFFFFFFFFFFFFFFFFULL); AP4_Sample sample; for (unsigned int i=cursor->m_SampleIndex+1; i<=cursor->m_Samples->GetSampleCount(); i++) { AP4_UI64 dts; if (i < cursor->m_Samples->GetSampleCount()) { result = cursor->m_Samples->GetSample(i, sample); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to get sample %d (%d)\n", i, result); return; } if (!sample.IsSync()) continue; // only look for sync samples dts = sample.GetDts(); } else { result = cursor->m_Samples->GetSample(i-1, sample); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to get sample %d (%d)\n", i-1, result); return; } dts = sample.GetDts()+sample.GetDuration(); } AP4_SI64 diff = dts-target_dts; AP4_UI64 abs_diff = diff<0?-diff:diff; if (abs_diff < smallest_diff) { // this sample is the closest to the target so far end_sample_index = i; smallest_diff = abs_diff; } if (diff >= 0) { // this sample is past the target, it is not going to get any better, stop looking break; } } if (cursor->m_Eos) continue; if (Options.debug) { if (cursor == anchor_cursor) { printf("===="); } else { printf("----"); } printf(" Track ID %d - dts=%lld, target=%lld, start=%d, end=%d/%d\n", cursor->m_Track->GetId(), cursor->m_Sample.GetDts(), target_dts, cursor->m_SampleIndex, end_sample_index, cursor->m_Track->GetSampleCount()); } // emit a fragment for the selected track if (Options.verbosity > 1) { printf("fragment: track ID %d ", cursor->m_Track->GetId()); } // decide which sample description index to use // (this is not very sophisticated, we only look at the sample description // index of the first sample in the group, which may not be correct. This // should be fixed later) unsigned int sample_desc_index = cursor->m_Sample.GetDescriptionIndex(); unsigned int tfhd_flags = AP4_TFHD_FLAG_DEFAULT_BASE_IS_MOOF; if (sample_desc_index > 0) { tfhd_flags |= AP4_TFHD_FLAG_SAMPLE_DESCRIPTION_INDEX_PRESENT; } if (cursor->m_Track->GetType() == AP4_Track::TYPE_VIDEO) { tfhd_flags |= AP4_TFHD_FLAG_DEFAULT_SAMPLE_FLAGS_PRESENT; } // setup the moof structure AP4_ContainerAtom* moof = new AP4_ContainerAtom(AP4_ATOM_TYPE_MOOF); AP4_MfhdAtom* mfhd = new AP4_MfhdAtom(sequence_number++); moof->AddChild(mfhd); AP4_ContainerAtom* traf = new AP4_ContainerAtom(AP4_ATOM_TYPE_TRAF); AP4_TfhdAtom* tfhd = new AP4_TfhdAtom(tfhd_flags, cursor->m_Track->GetId(), 0, sample_desc_index+1, 0, 0, 0); if (tfhd_flags & AP4_TFHD_FLAG_DEFAULT_SAMPLE_FLAGS_PRESENT) { tfhd->SetDefaultSampleFlags(0x1010000); // sample_is_non_sync_sample=1, sample_depends_on=1 (not I frame) } traf->AddChild(tfhd); if (!Options.no_tfdt) { AP4_TfdtAtom* tfdt = new AP4_TfdtAtom(1, cursor->m_Timestamp); traf->AddChild(tfdt); } AP4_UI32 trun_flags = AP4_TRUN_FLAG_DATA_OFFSET_PRESENT | AP4_TRUN_FLAG_SAMPLE_DURATION_PRESENT | AP4_TRUN_FLAG_SAMPLE_SIZE_PRESENT; AP4_UI32 first_sample_flags = 0; if (cursor->m_Track->GetType() == AP4_Track::TYPE_VIDEO) { trun_flags |= AP4_TRUN_FLAG_FIRST_SAMPLE_FLAGS_PRESENT; first_sample_flags = 0x2000000; // sample_depends_on=2 (I frame) } AP4_TrunAtom* trun = new AP4_TrunAtom(trun_flags, 0, first_sample_flags); traf->AddChild(trun); moof->AddChild(traf); // create a new FragmentInfo object to store the fragment details FragmentInfo* fragment = new FragmentInfo(cursor->m_Samples, cursor->m_Tfra, cursor->m_Timestamp, moof); fragments.Add(fragment); // add samples to the fragment unsigned int sample_count = 0; AP4_Array<AP4_TrunAtom::Entry> trun_entries; fragment->m_MdatSize = AP4_ATOM_HEADER_SIZE; for (;;) { // if we have one non-zero CTS delta, we'll need to express it if (cursor->m_Sample.GetCtsDelta()) { trun->SetFlags(trun->GetFlags() | AP4_TRUN_FLAG_SAMPLE_COMPOSITION_TIME_OFFSET_PRESENT); } // add one sample trun_entries.SetItemCount(sample_count+1); AP4_TrunAtom::Entry& trun_entry = trun_entries[sample_count]; trun_entry.sample_duration = timescale? (AP4_UI32)AP4_ConvertTime(cursor->m_Sample.GetDuration(), cursor->m_Track->GetMediaTimeScale(), timescale): cursor->m_Sample.GetDuration(); trun_entry.sample_size = cursor->m_Sample.GetSize(); trun_entry.sample_composition_time_offset = timescale? (AP4_UI32)AP4_ConvertTime(cursor->m_Sample.GetCtsDelta(), cursor->m_Track->GetMediaTimeScale(), timescale): cursor->m_Sample.GetCtsDelta(); fragment->m_SampleIndexes.SetItemCount(sample_count+1); fragment->m_SampleIndexes[sample_count] = cursor->m_SampleIndex; fragment->m_MdatSize += trun_entry.sample_size; fragment->m_Duration += trun_entry.sample_duration; // next sample cursor->m_Timestamp += trun_entry.sample_duration; result = cursor->SetSampleIndex(cursor->m_SampleIndex+1); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to get sample %d (%d)\n", cursor->m_SampleIndex+1, result); return; } sample_count++; if (cursor->m_Eos) { if (Options.debug) { printf("[Track ID %d has reached the end]\n", cursor->m_Track->GetId()); } break; } if (cursor->m_SampleIndex >= end_sample_index) { break; // done with this fragment } } if (Options.verbosity > 1) { printf(" %d samples\n", sample_count); } // update moof and children trun->SetEntries(trun_entries); trun->SetDataOffset((AP4_UI32)moof->GetSize()+AP4_ATOM_HEADER_SIZE); // advance the cursor's fragment index ++cursor->m_FragmentIndex; } // write the ftyp atom AP4_FtypAtom* ftyp = input_file.GetFileType(); if (ftyp) { // keep the existing brand and compatible brands AP4_Array<AP4_UI32> compatible_brands; compatible_brands.EnsureCapacity(ftyp->GetCompatibleBrands().ItemCount()+1); for (unsigned int i=0; i<ftyp->GetCompatibleBrands().ItemCount(); i++) { compatible_brands.Append(ftyp->GetCompatibleBrands()[i]); } // add the compatible brand if it is not already there if (!ftyp->HasCompatibleBrand(AP4_FILE_BRAND_ISO5)) { compatible_brands.Append(AP4_FILE_BRAND_ISO5); } // create a replacement AP4_FtypAtom* new_ftyp = new AP4_FtypAtom(ftyp->GetMajorBrand(), ftyp->GetMinorVersion(), &compatible_brands[0], compatible_brands.ItemCount()); ftyp = new_ftyp; } else { AP4_UI32 compat = AP4_FILE_BRAND_ISO5; ftyp = new AP4_FtypAtom(AP4_FTYP_BRAND_MP42, 0, &compat, 1); } ftyp->Write(output_stream); delete ftyp; // write the moov atom output_movie->GetMoovAtom()->Write(output_stream); // write the (not-yet fully computed) index if needed AP4_SidxAtom* sidx = NULL; AP4_Position sidx_position = 0; output_stream.Tell(sidx_position); if (create_segment_index) { sidx = new AP4_SidxAtom(index_cursor->m_Track->GetId(), index_cursor->m_Track->GetMediaTimeScale(), 0, 0); // reserve space for the entries now, but they will be computed and updated later sidx->SetReferenceCount(fragments.ItemCount()); sidx->Write(output_stream); } // write all fragments for (AP4_List<FragmentInfo>::Item* item = fragments.FirstItem(); item; item = item->GetNext()) { FragmentInfo* fragment = item->GetData(); // remember the time and position of this fragment output_stream.Tell(fragment->m_MoofPosition); fragment->m_Tfra->AddEntry(fragment->m_Timestamp, fragment->m_MoofPosition); // write the moof fragment->m_Moof->Write(output_stream); // write mdat output_stream.WriteUI32(fragment->m_MdatSize); output_stream.WriteUI32(AP4_ATOM_TYPE_MDAT); AP4_DataBuffer sample_data; AP4_Sample sample; for (unsigned int i=0; i<fragment->m_SampleIndexes.ItemCount(); i++) { // get the sample result = fragment->m_Samples->GetSample(fragment->m_SampleIndexes[i], sample); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to get sample %d (%d)\n", fragment->m_SampleIndexes[i], result); return; } // read the sample data result = sample.ReadData(sample_data); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to read sample data for sample %d (%d)\n", fragment->m_SampleIndexes[i], result); return; } // write the sample data result = output_stream.Write(sample_data.GetData(), sample_data.GetDataSize()); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to write sample data (%d)\n", result); return; } } } // update the index and re-write it if needed if (create_segment_index) { unsigned int segment_index = 0; AP4_SidxAtom::Reference reference; for (AP4_List<FragmentInfo>::Item* item = fragments.FirstItem(); item; item = item->GetNext()) { FragmentInfo* fragment = item->GetData(); reference.m_ReferencedSize = (AP4_UI32)(fragment->m_Moof->GetSize()+fragment->m_MdatSize); reference.m_SubsegmentDuration = fragment->m_Duration; reference.m_StartsWithSap = true; sidx->SetReference(segment_index++, reference); } AP4_Position here = 0; output_stream.Tell(here); output_stream.Seek(sidx_position); sidx->Write(output_stream); output_stream.Seek(here); delete sidx; } // create an mfra container and write out the index AP4_ContainerAtom mfra(AP4_ATOM_TYPE_MFRA); for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (track_id && cursors[i]->m_Track->GetId() != track_id) { continue; } mfra.AddChild(cursors[i]->m_Tfra); cursors[i]->m_Tfra = NULL; } AP4_MfroAtom* mfro = new AP4_MfroAtom((AP4_UI32)mfra.GetSize()+16); mfra.AddChild(mfro); result = mfra.Write(output_stream); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to write 'mfra' (%d)\n", result); return; } // cleanup fragments.DeleteReferences(); for (unsigned int i=0; i<cursors.ItemCount(); i++) { delete cursors[i]; } for (AP4_List<FragmentInfo>::Item* item = fragments.FirstItem(); item; item = item->GetNext()) { FragmentInfo* fragment = item->GetData(); delete fragment->m_Moof; } delete output_movie; }
/*---------------------------------------------------------------------- | AP4_Track::AP4_Track +---------------------------------------------------------------------*/ AP4_Track::AP4_Track(Type type, AP4_SampleTable* sample_table, AP4_UI32 track_id, AP4_UI32 movie_time_scale, AP4_UI32 media_time_scale, AP4_UI64 media_duration, const char* language, AP4_UI32 width, AP4_UI32 height) : m_TrakAtomIsOwned(true), m_Type(type), m_SampleTable(sample_table), m_SampleTableIsOwned(false), m_MovieTimeScale(movie_time_scale ? movie_time_scale : AP4_TRACK_DEFAULT_MOVIE_TIMESCALE), m_MediaTimeScale(media_time_scale) { // compute the default volume value unsigned int volume = 0; if (type == TYPE_AUDIO) volume = 0x100; // compute the handler type and name AP4_Atom::Type hdlr_type; const char* hdlr_name; switch (type) { case TYPE_AUDIO: hdlr_type = AP4_HANDLER_TYPE_SOUN; hdlr_name = "Bento4 Sound Handler"; break; case TYPE_VIDEO: hdlr_type = AP4_HANDLER_TYPE_VIDE; hdlr_name = "Bento4 Video Handler"; break; case TYPE_HINT: hdlr_type = AP4_HANDLER_TYPE_HINT; hdlr_name = "Bento4 Hint Handler"; break; default: hdlr_type = 0; hdlr_name = NULL; break; } // compute the track duration in units of the movie time scale AP4_UI64 track_duration = AP4_ConvertTime(media_duration, media_time_scale, movie_time_scale); // create a trak atom m_TrakAtom = DNew AP4_TrakAtom(sample_table, hdlr_type, hdlr_name, track_id, 0, 0, track_duration, media_time_scale, media_duration, volume, language, width, height); }
/*---------------------------------------------------------------------- | AP4_LinearReader::SeekTo +---------------------------------------------------------------------*/ AP4_Result AP4_LinearReader::SeekTo(AP4_UI32 time_ms, AP4_UI32* actual_time_ms) { if (actual_time_ms) *actual_time_ms = time_ms; // default // we only support fragmented sources for now if (!m_HasFragments) return AP4_ERROR_NOT_SUPPORTED; // look for a fragment index if (m_Mfra == NULL) { if (m_FragmentStream) { // get the size of the stream (needed) AP4_LargeSize stream_size = 0; m_FragmentStream->GetSize(stream_size); if (stream_size > 12) { // remember where we are AP4_Position here; m_FragmentStream->Tell(here); // read the last 12 bytes unsigned char mfro[12]; AP4_Result result = m_FragmentStream->Seek(stream_size-12); if (AP4_SUCCEEDED(result)) { result = m_FragmentStream->Read(mfro, 12); } if (AP4_SUCCEEDED(result) && mfro[0] == 'm' && mfro[1] == 'f' && mfro[2] == 'r' && mfro[3] == 'o') { AP4_UI32 mfra_size = AP4_BytesToUInt32BE(&mfro[8]); if ((AP4_LargeSize)mfra_size < stream_size) { result = m_FragmentStream->Seek(stream_size-mfra_size); if (AP4_SUCCEEDED(result)) { AP4_Atom* mfra = NULL; AP4_LargeSize available = mfra_size; AP4_DefaultAtomFactory::Instance.CreateAtomFromStream(*m_FragmentStream, available, mfra); m_Mfra = AP4_DYNAMIC_CAST(AP4_ContainerAtom, mfra); } } } if (AP4_SUCCEEDED(result)) { result = m_FragmentStream->Seek(here); } } } } // return now if we have not found an index if (m_Mfra == NULL) { return AP4_ERROR_NOT_SUPPORTED; } // look for the earliest fragment referenced by an entry with the largest timestamp that's // before or equal to the requested time int best_entry = -1; for (unsigned t=0; t<m_Trackers.ItemCount(); t++) { // find the tfra index for this track AP4_TfraAtom* tfra = NULL; for (AP4_List<AP4_Atom>::Item* item = m_Mfra->GetChildren().FirstItem(); item; item = item->GetNext()) { if (item->GetData()->GetType() == AP4_ATOM_TYPE_TFRA) { AP4_TfraAtom* tfra_ = (AP4_TfraAtom*)item->GetData(); if (tfra_->GetTrackId() == m_Trackers[t]->m_Track->GetId()) { tfra = tfra_; break; } } } if (tfra == NULL) { return AP4_ERROR_NOT_SUPPORTED; } AP4_Array<AP4_TfraAtom::Entry>& entries = tfra->GetEntries(); AP4_UI64 media_time = AP4_ConvertTime(time_ms, 1000, m_Trackers[t]->m_Track->GetMediaTimeScale()); int entry = -1; for (int i=0; i<(int)entries.ItemCount(); i++) { if (entries[i].m_Time > media_time) break; entry = i; } if (entry >= 0) { if (best_entry == -1) { best_entry = entry; } else if (entries[entry].m_MoofOffset < entries[best_entry].m_MoofOffset) { best_entry = entry; } // update our position if (best_entry >= 0) { if (actual_time_ms) { // report the actual time we found (in milliseconds) *actual_time_ms = (AP4_UI32)AP4_ConvertTime(entries[best_entry].m_Time, m_Trackers[t]->m_Track->GetMediaTimeScale(), 1000); } m_NextFragmentPosition = entries[best_entry].m_MoofOffset; } } } // check that we found something if (best_entry == -1) { return AP4_FAILURE; } // flush any queued samples FlushQueues(); // reset tracker states for (unsigned int i=0; i<m_Trackers.ItemCount(); i++) { delete m_Trackers[i]->m_SampleTable; delete m_Trackers[i]->m_NextSample; m_Trackers[i]->m_SampleTable = NULL; m_Trackers[i]->m_NextSample = NULL; m_Trackers[i]->m_NextSampleIndex = 0; m_Trackers[i]->m_Eos = false; } return AP4_SUCCESS; }
/*---------------------------------------------------------------------- | Fragment +---------------------------------------------------------------------*/ static void Fragment(AP4_File& input_file, AP4_ByteStream& output_stream, unsigned int fragment_duration, AP4_UI32 timescale) { AP4_Result result; AP4_Movie* input_movie = input_file.GetMovie(); if (input_movie == NULL) { fprintf(stderr, "ERROR: no moov found in the input file\n"); return; } // create the output file object AP4_Movie* output_movie = new AP4_Movie(1000); // create an mvex container AP4_ContainerAtom* mvex = new AP4_ContainerAtom(AP4_ATOM_TYPE_MVEX); AP4_MehdAtom* mehd = new AP4_MehdAtom(0); mvex->AddChild(mehd); // create a cusor list to keep track of the tracks we will read from AP4_Array<TrackCursor*> cursors; // add an output track for each track in the input file for (AP4_List<AP4_Track>::Item* track_item = input_movie->GetTracks().FirstItem(); track_item; track_item = track_item->GetNext()) { AP4_Track* track = track_item->GetData(); TrackCursor* cursor = new TrackCursor(); cursor->m_TrackId = track->GetId(); cursor->m_Tfra->SetTrackId(track->GetId()); cursors.Append(cursor); // create a sample table (with no samples) to hold the sample description AP4_SyntheticSampleTable* sample_table = new AP4_SyntheticSampleTable(); for (unsigned int i=0; i<track->GetSampleDescriptionCount(); i++) { AP4_SampleDescription* sample_description = track->GetSampleDescription(i); sample_table->AddSampleDescription(sample_description, false); } // create the track AP4_Track* output_track = new AP4_Track(track->GetType(), sample_table, cursor->m_TrackId, timescale?timescale:1000, AP4_ConvertTime(track->GetDuration(), input_movie->GetTimeScale(), timescale?timescale:1000), timescale?timescale:track->GetMediaTimeScale(), 0,//track->GetMediaDuration(), track->GetTrackLanguage(), track->GetWidth(), track->GetHeight()); output_movie->AddTrack(output_track); result = cursor->SetTrack(track); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to read sample (%d)\n", result); return; } // add a trex entry to the mvex container AP4_TrexAtom* trex = new AP4_TrexAtom(cursor->m_TrackId, 1, 0, 0, 0); mvex->AddChild(trex); } if (cursors.ItemCount() == 0) { fprintf(stderr, "ERROR: no track found\n"); return; } for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (cursors[i]->m_Track->GetType() == AP4_Track::TYPE_VIDEO) { cursors[i]->m_TargetDuration = AP4_ConvertTime(fragment_duration>AP4_FRAGMENTER_FRAGMENT_DURATION_TOLERANCE ? fragment_duration-AP4_FRAGMENTER_FRAGMENT_DURATION_TOLERANCE : 0, 1000, cursors[i]->m_Track->GetMediaTimeScale()); } else { cursors[i]->m_TargetDuration = AP4_ConvertTime(fragment_duration, 1000, cursors[i]->m_Track->GetMediaTimeScale()); } } // update the mehd duration mehd->SetDuration(output_movie->GetDuration()); // the mvex container to the moov container output_movie->GetMoovAtom()->AddChild(mvex); // write the ftyp atom AP4_FtypAtom* ftyp = input_file.GetFileType(); if (ftyp) { ftyp->Write(output_stream); } // write the moov atom output_movie->GetMoovAtom()->Write(output_stream); // write all the fragments unsigned int sequence_number = 1; for(;;) { // select the next track to read from TrackCursor* cursor = NULL; AP4_UI64 min_dts = (AP4_UI64)(-1); for (unsigned int i=0; i<cursors.ItemCount(); i++) { if (cursors[i]->m_Eos) continue; AP4_UI64 dts = AP4_ConvertTime(cursors[i]->m_Sample.GetDts(), cursors[i]->m_Track->GetMediaTimeScale(), AP4_FRAGMENTER_BASE_TIMESCALE); if (dts < min_dts) { min_dts = dts; cursor = cursors[i]; } } if (cursor == NULL) break; // all done // compute the target end for the segment cursor->m_EndDts = cursor->m_Sample.GetDts()+cursor->m_TargetDuration; // emit a fragment for the selected track if (Options.verbosity > 0) { printf("fragment: track ID %d ", cursor->m_Track->GetId()); } // remember the time and position of this fragment AP4_Position moof_offset = 0; output_stream.Tell(moof_offset); cursor->m_Tfra->AddEntry(cursor->m_Timestamp, moof_offset); // decide which sample description index to use // (this is not very sophisticated, we only look at the sample description // index of the first sample in the group, which may not be correct. This // should be fixed later) unsigned int sample_desc_index = cursor->m_Sample.GetDescriptionIndex(); unsigned int tfhd_flags = AP4_TFHD_FLAG_DEFAULT_BASE_IS_MOOF; if (sample_desc_index > 0) { tfhd_flags |= AP4_TFHD_FLAG_SAMPLE_DESCRIPTION_INDEX_PRESENT; } if (cursor->m_Track->GetType() == AP4_Track::TYPE_VIDEO) { tfhd_flags |= AP4_TFHD_FLAG_DEFAULT_SAMPLE_FLAGS_PRESENT; } // setup the moof structure AP4_ContainerAtom* moof = new AP4_ContainerAtom(AP4_ATOM_TYPE_MOOF); AP4_MfhdAtom* mfhd = new AP4_MfhdAtom(sequence_number++); moof->AddChild(mfhd); AP4_ContainerAtom* traf = new AP4_ContainerAtom(AP4_ATOM_TYPE_TRAF); AP4_TfhdAtom* tfhd = new AP4_TfhdAtom(tfhd_flags, cursor->m_TrackId, 0, sample_desc_index+1, 0, 0, 0); if (tfhd_flags & AP4_TFHD_FLAG_DEFAULT_SAMPLE_FLAGS_PRESENT) { tfhd->SetDefaultSampleFlags(0x1010000); // sample_is_non_sync_sample=1, sample_depends_on=1 (not I frame) } traf->AddChild(tfhd); AP4_TfdtAtom* tfdt = new AP4_TfdtAtom(1, cursor->m_Timestamp); traf->AddChild(tfdt); AP4_UI32 trun_flags = AP4_TRUN_FLAG_DATA_OFFSET_PRESENT | AP4_TRUN_FLAG_SAMPLE_DURATION_PRESENT | AP4_TRUN_FLAG_SAMPLE_SIZE_PRESENT; AP4_UI32 first_sample_flags = 0; if (cursor->m_Track->GetType() == AP4_Track::TYPE_VIDEO) { trun_flags |= AP4_TRUN_FLAG_FIRST_SAMPLE_FLAGS_PRESENT; first_sample_flags = 0x2000000; // sample_depends_on=2 (I frame) } AP4_TrunAtom* trun = new AP4_TrunAtom(trun_flags, 0, first_sample_flags); traf->AddChild(trun); moof->AddChild(traf); // decide which samples go in this fragment AP4_Array<AP4_UI32> sample_indexes; unsigned int sample_count = 0; AP4_Array<AP4_TrunAtom::Entry> trun_entries; AP4_UI32 mdat_size = AP4_ATOM_HEADER_SIZE; for (;;) { // if we have one non-zero CTS delta, we'll need to express it if (cursor->m_Sample.GetCtsDelta()) { trun->SetFlags(trun->GetFlags() | AP4_TRUN_FLAG_SAMPLE_COMPOSITION_TIME_OFFSET_PRESENT); } // add one sample trun_entries.SetItemCount(sample_count+1); AP4_TrunAtom::Entry& trun_entry = trun_entries[sample_count]; trun_entry.sample_duration = timescale? (AP4_UI32)AP4_ConvertTime(cursor->m_Sample.GetDuration(), cursor->m_Track->GetMediaTimeScale(), timescale): cursor->m_Sample.GetDuration(); trun_entry.sample_size = cursor->m_Sample.GetSize(); trun_entry.sample_composition_time_offset = timescale? (AP4_UI32)AP4_ConvertTime(cursor->m_Sample.GetCtsDelta(), cursor->m_Track->GetMediaTimeScale(), timescale): cursor->m_Sample.GetCtsDelta(); sample_indexes.SetItemCount(sample_count+1); sample_indexes[sample_count] = cursor->m_SampleIndex; mdat_size += trun_entry.sample_size; // next sample cursor->m_Timestamp += trun_entry.sample_duration; cursor->m_SampleIndex++; sample_count++; if (cursor->m_SampleIndex >= cursor->m_Track->GetSampleCount()) { cursor->m_Eos = true; AP4_UI64 end_dts = cursor->m_Sample.GetDts()+cursor->m_Sample.GetDuration(); cursor->m_Sample.Reset(); cursor->m_Sample.SetDts(end_dts); break; } result = cursor->m_Track->GetSample(cursor->m_SampleIndex, cursor->m_Sample); if (AP4_FAILED(result)) { cursor->m_Eos = true; AP4_UI64 end_dts = cursor->m_Sample.GetDts()+cursor->m_Sample.GetDuration(); cursor->m_Sample.Reset(); cursor->m_Sample.SetDts(end_dts); break; } if (cursor->m_Sample.IsSync()) { if (cursor->m_Sample.GetDts() >= cursor->m_EndDts) { break; // done with this segment } } } if (Options.verbosity) { printf(" %d samples\n", sample_count); } // update moof and children trun->SetEntries(trun_entries); trun->SetDataOffset((AP4_UI32)moof->GetSize()+AP4_ATOM_HEADER_SIZE); // write moof moof->Write(output_stream); // write mdat output_stream.WriteUI32(mdat_size); output_stream.WriteUI32(AP4_ATOM_TYPE_MDAT); AP4_Sample sample; AP4_DataBuffer sample_data; for (unsigned int i=0; i<sample_indexes.ItemCount(); i++) { result = cursor->m_Track->ReadSample(sample_indexes[i], sample, sample_data); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to read sample %d (%d)\n", sample_indexes[i], result); return; } result = output_stream.Write(sample_data.GetData(), sample_data.GetDataSize()); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to write sample data (%d)\n", result); return; } } // cleanup delete moof; } // create an mfra container and write out the index AP4_ContainerAtom mfra(AP4_ATOM_TYPE_MFRA); for (unsigned int i=0; i<cursors.ItemCount(); i++) { mfra.AddChild(cursors[i]->m_Tfra); cursors[i]->m_Tfra = NULL; } AP4_MfroAtom* mfro = new AP4_MfroAtom((AP4_UI32)mfra.GetSize()+16); mfra.AddChild(mfro); result = mfra.Write(output_stream); if (AP4_FAILED(result)) { fprintf(stderr, "ERROR: failed to write 'mfra' (%d)\n", result); return; } // cleanup for (unsigned int i=0; i<cursors.ItemCount(); i++) { delete cursors[i]; } delete output_movie; }