extern int mp4_split(struct mp4_context_t* mp4_context, unsigned int* trak_sample_start, unsigned int* trak_sample_end, mp4_split_options_t const* options) { int result; float start_time = options->start; float end_time = options->end; moov_build_index(mp4_context, mp4_context->moov); { struct moov_t const* moov = mp4_context->moov; long moov_time_scale = moov->mvhd_->timescale_; unsigned int start = (unsigned int)(start_time * moov_time_scale + 0.5f); unsigned int end = (unsigned int)(end_time * moov_time_scale + 0.5f); // for every trak, convert seconds to sample (time-to-sample). // adjust sample to keyframe result = get_aligned_start_and_end(mp4_context, start, end, trak_sample_start, trak_sample_end); if (options->exact){ // now we need to find the audio track and RESET *its* trak_sample_start // time to the exact start time we want, regardless of keyframes unsigned int i=0; for(i=0; i != moov->tracks_; ++i){ struct trak_t* trak = moov->traks_[i]; if (trak->mdia_->hdlr_->handler_type_ == FOURCC('s','o','u','n')){ // the FOURCC is soun(d) AKA audio track long trak_time_scale = trak->mdia_->mdhd_->timescale_; struct stts_t* stts = trak->mdia_->minf_->stbl_->stts_; unsigned int start_exact_time_sample = stts_get_sample(stts, moov_time_to_trak_time((options->start * moov_time_scale), moov_time_scale, trak_time_scale)); MP4_WARNING("FFGOP: AUDIO REWRITING trak_sample_start[%i]: %u => %u\n", i, trak_sample_start[i], start_exact_time_sample); trak_sample_start[i] = start_exact_time_sample; } } } } return result; }
extern int mp4_fragment_file(struct mp4_context_t const* mp4_context, struct bucket_t** buckets) { uint64_t filepos = 0; int result = 1; moov_t* moov = mp4_context->moov; moov_t* fmoov; struct woov_t* woov = NULL; mfra_t* mfra; if(!moov_build_index(mp4_context, mp4_context->moov)) { return 0; } // Start with the ftyp { unsigned char ftyp[28]; unsigned char* buffer = ftyp; buffer = write_32(buffer, 28); buffer = write_32(buffer, FOURCC('f', 't', 'y', 'p')); buffer = write_32(buffer, FOURCC('a', 'v', 'c', '1')); // major_brand buffer = write_32(buffer, 0); // minor_version buffer = write_32(buffer, FOURCC('i', 's', 'o', 'm')); // compatible_brands buffer = write_32(buffer, FOURCC('i', 's', 'o', '2')); buffer = write_32(buffer, FOURCC('f', 'r', 'a', 'g')); bucket_insert_tail(buckets, bucket_init_memory(ftyp, sizeof(ftyp))); filepos += sizeof(ftyp); } { uint32_t i; struct trak_weight_t{ int w; void* v; }wtrack[MAX_TRACKS]; for(i = 0; i < moov->tracks_; i++) { if(moov->traks_[i]->mdia_->hdlr_->handler_type_ == FOURCC('s', 'o', 'u', 'n')) {wtrack[i].w=1;wtrack[i].v=moov->traks_[i];} else if(moov->traks_[i]->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e')) {wtrack[i].w=2;wtrack[i].v=moov->traks_[i];} else {wtrack[i].w=3;wtrack[i].v=moov->traks_[i];} } for (i = 1; i < moov->tracks_; ++i) { unsigned int j; for (j= moov->tracks_ - 1; j>=i;j--) { if(wtrack[j].w < wtrack[j-1].w) { struct trak_weight_t t = wtrack[j]; wtrack[j] = wtrack[j-1]; wtrack[j-1] = t; } } } for (i = 0; i < moov->tracks_; ++i) { moov->traks_[i] = wtrack[i].v; moov->traks_[i]->tkhd_->track_id_=i+1; } moov->mvhd_->next_track_id_=i+1; } // A fragmented MPEG4 file starts with a MOOV atom with only the mandatory // atoms fmoov = moov_init(); { unsigned int i; mvex_t* mvex = mvex_init(); fmoov->mvhd_ = mvhd_copy(moov->mvhd_); fmoov->mvhd_->duration_ = 0; fmoov->tracks_ = moov->tracks_; fmoov->mvex_ = mvex; for(i = 0; i != moov->tracks_; ++i) { unsigned int s; trak_t* trak = moov->traks_[i]; trak_t* ftrak = trak_init(); mdia_t* mdia = trak->mdia_; mdia_t* fmdia = mdia_init(); minf_t* minf = mdia->minf_; minf_t* fminf = minf_init(); stbl_t* stbl = minf->stbl_; stbl_t* fstbl = stbl_init(); fmoov->traks_[i] = ftrak; ftrak->tkhd_ = tkhd_copy(trak->tkhd_); ftrak->tkhd_->duration_ = 0; ftrak->mdia_ = fmdia; ftrak->samples_size_ = trak->samples_size_; ftrak->samples_ = (samples_t*) malloc((trak->samples_size_ + 1) * sizeof(samples_t)); memcpy(ftrak->samples_, trak->samples_, (trak->samples_size_ + 1) * sizeof(samples_t)); ftrak->smoothes_size_ = trak->smoothes_size_; ftrak->smoothes_ = (struct smooth_t*) malloc((trak->samples_size_ + 1) * sizeof(struct smooth_t)); memcpy(ftrak->smoothes_, trak->smoothes_, (trak->smoothes_size_ + 1) * sizeof(struct smooth_t)); fmdia->mdhd_ = mdhd_copy(mdia->mdhd_); // convert trak's timescale and duration fmdia->mdhd_->version_ = 1; fmdia->mdhd_->timescale_ = 10000000; fmdia->mdhd_->duration_ = 0; // trak_time_to_moov_time(fmdia->mdhd_->duration_, // fmdia->mdhd_->timescale_, mdia->mdhd_->timescale_); fmdia->hdlr_ = hdlr_copy(mdia->hdlr_); fmdia->minf_ = fminf; fminf->smhd_ = minf->smhd_ == NULL ? NULL : smhd_copy(minf->smhd_); fminf->vmhd_ = minf->vmhd_ == NULL ? NULL : vmhd_copy(minf->vmhd_); fminf->dinf_ = dinf_copy(minf->dinf_); fminf->stbl_ = fstbl; fstbl->stts_ = stts_init(); fstbl->ctts_ = ctts_init(); fstbl->stsz_ = stsz_init(); fstbl->stsc_ = stsc_init(); fstbl->stco_ = stco_init(); fstbl->stsd_ = stsd_copy(stbl->stsd_); for(s = 0; s != ftrak->samples_size_ + 1; ++s) { // SmoothStreaming uses a fixed 10000000 timescale ftrak->samples_[s].pts_ = trak_time_to_moov_time( ftrak->samples_[s].pts_, ftrak->mdia_->mdhd_->timescale_, trak->mdia_->mdhd_->timescale_); ftrak->samples_[s].cto_ = (unsigned int)(trak_time_to_moov_time( ftrak->samples_[s].cto_, ftrak->mdia_->mdhd_->timescale_, trak->mdia_->mdhd_->timescale_)); } { // update trak duration samples_t const* first = (samples_t const*)&ftrak->samples_[0]; samples_t const* last = (samples_t const*)&ftrak->samples_[ftrak->samples_size_]; ftrak->mdia_->mdhd_->duration_ = last->pts_ - first->pts_; ftrak->tkhd_->duration_ = trak_time_to_moov_time(ftrak->mdia_->mdhd_->duration_, fmoov->mvhd_->timescale_, ftrak->mdia_->mdhd_->timescale_); // update movie duration if(ftrak->tkhd_->duration_ > fmoov->mvhd_->duration_) { fmoov->mvhd_->duration_ = ftrak->tkhd_->duration_ ; } } { trex_t* trex = trex_init(); trex->track_id_ = trak->tkhd_->track_id_; trex->default_sample_description_index_ = 1; mvex->trexs_[mvex->tracks_] = trex; ++mvex->tracks_; } } { unsigned char* moov_data = mp4_context->moov_data; uint32_t moov_size = moov_write(fmoov, moov_data); bucket_insert_tail(buckets, bucket_init_memory(moov_data, moov_size)); filepos += moov_size; } } woov = woov_init(mp4_context, fmoov); mfra = mfra_init(); mfra->tracks_ = fmoov->tracks_; { unsigned int i; unsigned int tfra_entries = 0; for(i = 0; i != fmoov->tracks_; ++i) { trak_t const* trak = fmoov->traks_[i]; struct tfra_t* tfra = tfra_init(); mfra->tfras_[i] = tfra; tfra->version_ = 1; tfra->flags_ = 0; tfra->track_id_ = trak->tkhd_->track_id_; tfra->length_size_of_traf_num_ = 1; tfra->length_size_of_trun_num_ = 1; tfra->length_size_of_sample_num_ = 1; // count the number of smooth sync samples (nr of moofs) tfra->number_of_entry_ = 0; { unsigned int start; for(start = 0; start != trak->samples_size_; ++start) { { if(trak->samples_[start].is_smooth_ss_) { ++tfra->number_of_entry_; } } } } tfra->table_ = (tfra_table_t*) malloc(tfra->number_of_entry_ * sizeof(tfra_table_t)); tfra_entries += tfra->number_of_entry_; // next track } { unsigned int tfra_index = 0; trak_t const* base_trak = fmoov->traks_[0]; while(tfra_index != base_trak->smoothes_size_) { // insert moof bucket { moof_t* moof = moof_init(); bucket_t* bucket = bucket_init(BUCKET_TYPE_MEMORY); bucket_insert_tail(buckets, bucket); // create moof and write samples moof_create(mp4_context, fmoov, woov, moof, mfra, filepos, tfra_index+1, buckets, 0 /* OUTPUT_FORMAT_MP4 */); // if(options->output_format == OUTPUT_FORMAT_MP4) { unsigned int samples_count = 0; unsigned char* moof_data = NULL; unsigned int moof_size = 0; for(i = 0;i < moof->tracks_; ++i) samples_count += moof->trafs_[i]->trun_->sample_count_; moof_data = (unsigned char*)malloc(8192 + (samples_count) * 12); moof_size = moof_write(moof, moof_data); // now that we know the size of the moof atom, we know where the mdat // will start. We patch the 'data_offset' field to skip the // moof atom and the mdat header. moof->trafs_[0]->trun_->data_offset_ = moof_size + ATOM_PREAMBLE_SIZE; moof_size = moof_write(moof, moof_data); bucket->buf_ = malloc(moof_size); bucket->size_ = moof_size; memcpy(bucket->buf_, moof_data, (size_t)bucket->size_); free(moof_data); } moof_exit(moof); // advance filepos for moof and mdat atom while(*buckets != bucket) { filepos += bucket->size_; bucket = bucket->next_; } } // next fragment ++tfra_index; } } moov_exit(fmoov); { uint32_t woov_size = woov_write(mp4_context, woov, buckets); int offset = 0; offset += 28; // ftyp offset += woov_size; //woov offset += ATOM_PREAMBLE_SIZE; //mdat woov->mdat_size+=8; // header; if(woov->mdat_size > UINT32_MAX) { offset+=8; woov->mdat_size+=8; } for(i = 0; i != woov->moov->tracks_; ++i) { trak_t* trak = woov->moov->traks_[i]; stco_shift_offsets_inplace( (unsigned char*)trak->mdia_->minf_->stbl_->stco_->stco_inplace_, (int)offset); } write_32(woov_data + 8, woov_size); write_64(woov_data + 12, woov->mdat_size); bucket_insert_tail(buckets, bucket_init_memory(woov_data, sizeof(woov_data))); woov_exit(woov); } // Write the Movie Fragment Random Access (MFRA) atom { unsigned char* mfra_data = (unsigned char*)malloc(8192 + tfra_entries * 28); uint32_t mfra_size = mfra_write(mfra, mfra_data); bucket_insert_tail(buckets, bucket_init_memory(mfra_data, mfra_size)); mfra_exit(mfra); free(mfra_data); } } return result; }