static uint32_t woov_write( struct mp4_context_t const* mp4_context, struct woov_t* woov, struct bucket_t** buckets) { unsigned int i; uint32_t woov_size=0; bucket_t* woov_bucket = bucket_init(BUCKET_TYPE_MEMORY); bucket_insert_tail(buckets, woov_bucket); for(i = 0; i != woov->moov->tracks_; ++i) { trak_t* trak = woov->moov->traks_[i]; samples_t const* first = (samples_t const*)&trak->samples_[0]; samples_t const* last = (samples_t const*)&trak->samples_[trak->samples_size_]; // info need to update trak->mdia_->minf_->stbl_->stts_ = stts_create(mp4_context, first, last); //time-to-sample trak->mdia_->minf_->stbl_->ctts_ = ctts_create(mp4_context, first, last); //composition time-to-sample table trak->mdia_->minf_->stbl_->stsz_ = stsz_create(mp4_context, first, last); //sample sizes (framing) trak->mdia_->minf_->stbl_->stss_ = stss_create(mp4_context, first, last); //sync sample // update trak duration trak->mdia_->mdhd_->duration_ = last->pts_ - first->pts_; trak->tkhd_->duration_ = trak_time_to_moov_time(trak->mdia_->mdhd_->duration_, woov->moov->mvhd_->timescale_, trak->mdia_->mdhd_->timescale_); // update movie duration if(trak->tkhd_->duration_ > woov->moov->mvhd_->duration_) { woov->moov->mvhd_->duration_ = trak->tkhd_->duration_ ; } } woov_bucket->buf_ = malloc( 16 * 1024 * 1024 ); woov_bucket->size_ = woov_size = moov_write(woov->moov, (unsigned char*)woov_bucket->buf_); //write_32((unsigned char*)woov_bucket->buf_ + 4, FOURCC('m', 'o', 'o', 'v')); write_32((unsigned char*)woov_bucket->buf_ + 4, FOURCC('f', 'r', 'e', 'e')); return woov_size; }
extern void bucket_insert_head(bucket_t** head, bucket_t* bucket) { bucket_insert_tail(head, bucket); *head = bucket; }
extern int output_mp4(struct mp4_context_t* mp4_context, unsigned int const* trak_sample_start, unsigned int const* trak_sample_end, struct bucket_t** buckets, struct mp4_split_options_t* options) { unsigned int i; uint64_t mdat_start = mp4_context->mdat_atom.start_; uint64_t mdat_size = mp4_context->mdat_atom.size_; int64_t offset; struct moov_t* moov = mp4_context->moov; // unsigned char* moov_data = mp4_context->moov_data; unsigned char* moov_data = (unsigned char*) malloc((size_t)mp4_context->moov_atom.size_ + ATOM_PREAMBLE_SIZE + 1024); uint64_t moov_size; long moov_time_scale = moov->mvhd_->timescale_; uint64_t skip_from_start = UINT64_MAX; uint64_t end_offset = 0; uint64_t moov_duration = 0; #if 1 uint64_t new_mdat_start = 0; { static char const free_data[] = { 0x0, 0x0, 0x0, 42, 'f', 'r', 'e', 'e', 'v', 'i', 'd', 'e', 'o', ' ', 's', 'e', 'r', 'v', 'e', 'd', ' ', 'b', 'y', ' ', 'm', 'o', 'd', '_', 'h', '2', '6', '4', '_', 's', 't', 'r', 'e', 'a', 'm', 'i', 'n', 'g' }; uint32_t size_of_header = (uint32_t)mp4_context->ftyp_atom.size_ + sizeof(free_data); unsigned char* buffer = (unsigned char*)malloc(size_of_header); if(mp4_context->ftyp_atom.size_) { fseeko(mp4_context->infile, mp4_context->ftyp_atom.start_, SEEK_SET); if(fread(buffer, (off_t)mp4_context->ftyp_atom.size_, 1, mp4_context->infile) != 1) { MP4_ERROR("%s", "Error reading ftyp atom\n"); free(buffer); return 0; } } // copy free data memcpy(buffer + mp4_context->ftyp_atom.size_, free_data, sizeof(free_data)); if(options->output_format == OUTPUT_FORMAT_MP4) { bucket_t* bucket = bucket_init_memory(buffer, size_of_header); bucket_insert_tail(buckets, bucket); } free(buffer); new_mdat_start += size_of_header; } // new_mdat_start += mp4_context->moov_atom.size_; #endif offset = new_mdat_start - mp4_context->mdat_atom.start_; // subtract old moov size // offset -= mp4_context->moov_atom.size_; for(i = 0; i != moov->tracks_; ++i) { struct trak_t* trak = moov->traks_[i]; struct stbl_t* stbl = trak->mdia_->minf_->stbl_; unsigned int start_sample = trak_sample_start[i]; unsigned int end_sample = trak_sample_end[i]; if (options->exact) trak_fast_forward_first_partial_GOP(mp4_context, options, trak, start_sample); trak_update_index(mp4_context, trak, start_sample, end_sample); if(trak->samples_size_ == 0) { MP4_WARNING("Trak %u contains no samples. Maybe a fragmented file?", i); return 1; } { uint64_t skip = trak->samples_[start_sample].pos_ - trak->samples_[0].pos_; if(skip < skip_from_start) skip_from_start = skip; MP4_INFO("Trak can skip %"PRIu64" bytes\n", skip); if(end_sample != trak->samples_size_) { uint64_t end_pos = trak->samples_[end_sample].pos_; if(end_pos > end_offset) end_offset = end_pos; MP4_INFO("New endpos=%"PRIu64"\n", end_pos); MP4_INFO("Trak can skip %"PRIu64" bytes at end\n", mdat_start + mdat_size - end_offset); } } { // fixup trak (duration) uint64_t trak_duration = stts_get_duration(stbl->stts_); long trak_time_scale = trak->mdia_->mdhd_->timescale_; { uint64_t duration = trak_time_to_moov_time(trak_duration, moov_time_scale, trak_time_scale); trak->mdia_->mdhd_->duration_= trak_duration; trak->tkhd_->duration_ = duration; MP4_INFO("trak: new_duration=%"PRIu64"\n", duration); if(duration > moov_duration) moov_duration = duration; } } // MP4_INFO("stco.size=%d, ", read_int32(stbl->stco_ + 4)); // MP4_INFO("stts.size=%d samples=%d\n", read_int32(stbl->stts_ + 4), stts_get_samples(stbl->stts_)); // MP4_INFO("stsz.size=%d\n", read_int32(stbl->stsz_ + 8)); // MP4_INFO("stsc.samples=%d\n", stsc_get_samples(stbl->stsc_)); } moov->mvhd_->duration_ = moov_duration; MP4_INFO("moov: new_duration=%.2f seconds\n", moov_duration / (float)moov_time_scale); // subtract bytes we skip at the front of the mdat atom offset -= skip_from_start; MP4_INFO("%s", "moov: writing header\n"); moov_write(moov, moov_data); moov_size = read_32(moov_data); // add new moov size offset += moov_size; MP4_INFO("shifting offsets by %"PRId64"\n", offset); moov_shift_offsets_inplace(moov, offset); // traffic shaping: create offsets for each second create_traffic_shaping(moov, trak_sample_start, trak_sample_end, offset, options); #ifdef COMPRESS_MOOV_ATOM if(!options->client_is_flash) { compress_moov(mp4_context, moov, moov_data, &moov_size); } #endif if(end_offset != 0) { MP4_INFO("mdat_size=%"PRId64" end_offset=%"PRId64"\n", mdat_size, end_offset); mdat_size = end_offset - mdat_start; } mdat_start += skip_from_start; mdat_size -= skip_from_start; MP4_INFO("mdat_bucket(%"PRId64", %"PRId64")\n", mdat_start, mdat_size); bucket_insert_tail(buckets, bucket_init_memory(moov_data, moov_size)); free(moov_data); { struct mp4_atom_t mdat_atom; mdat_atom.type_ = FOURCC('m', 'd', 'a', 't'); mdat_atom.short_size_ = 0; // TODO: use original small/wide mdat box if(options->adaptive) { // empty mdat atom mdat_atom.size_ = ATOM_PREAMBLE_SIZE; } else { mdat_atom.size_ = mdat_size; } { unsigned char buffer[32]; int mdat_header_size = mp4_atom_write_header(buffer, &mdat_atom); bucket_insert_tail(buckets, bucket_init_memory(buffer, mdat_header_size)); if(mdat_atom.size_ - mdat_header_size) { bucket_insert_tail(buckets, bucket_init_file(mdat_start + mdat_header_size, mdat_atom.size_ - mdat_header_size)); } } } return 1; }
extern int mp4_fragment_file(struct mp4_context_t const* mp4_context, struct bucket_t** buckets) { uint64_t filepos = 0; int result = 1; moov_t* moov = mp4_context->moov; moov_t* fmoov; struct woov_t* woov = NULL; mfra_t* mfra; if(!moov_build_index(mp4_context, mp4_context->moov)) { return 0; } // Start with the ftyp { unsigned char ftyp[28]; unsigned char* buffer = ftyp; buffer = write_32(buffer, 28); buffer = write_32(buffer, FOURCC('f', 't', 'y', 'p')); buffer = write_32(buffer, FOURCC('a', 'v', 'c', '1')); // major_brand buffer = write_32(buffer, 0); // minor_version buffer = write_32(buffer, FOURCC('i', 's', 'o', 'm')); // compatible_brands buffer = write_32(buffer, FOURCC('i', 's', 'o', '2')); buffer = write_32(buffer, FOURCC('f', 'r', 'a', 'g')); bucket_insert_tail(buckets, bucket_init_memory(ftyp, sizeof(ftyp))); filepos += sizeof(ftyp); } { uint32_t i; struct trak_weight_t{ int w; void* v; }wtrack[MAX_TRACKS]; for(i = 0; i < moov->tracks_; i++) { if(moov->traks_[i]->mdia_->hdlr_->handler_type_ == FOURCC('s', 'o', 'u', 'n')) {wtrack[i].w=1;wtrack[i].v=moov->traks_[i];} else if(moov->traks_[i]->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e')) {wtrack[i].w=2;wtrack[i].v=moov->traks_[i];} else {wtrack[i].w=3;wtrack[i].v=moov->traks_[i];} } for (i = 1; i < moov->tracks_; ++i) { unsigned int j; for (j= moov->tracks_ - 1; j>=i;j--) { if(wtrack[j].w < wtrack[j-1].w) { struct trak_weight_t t = wtrack[j]; wtrack[j] = wtrack[j-1]; wtrack[j-1] = t; } } } for (i = 0; i < moov->tracks_; ++i) { moov->traks_[i] = wtrack[i].v; moov->traks_[i]->tkhd_->track_id_=i+1; } moov->mvhd_->next_track_id_=i+1; } // A fragmented MPEG4 file starts with a MOOV atom with only the mandatory // atoms fmoov = moov_init(); { unsigned int i; mvex_t* mvex = mvex_init(); fmoov->mvhd_ = mvhd_copy(moov->mvhd_); fmoov->mvhd_->duration_ = 0; fmoov->tracks_ = moov->tracks_; fmoov->mvex_ = mvex; for(i = 0; i != moov->tracks_; ++i) { unsigned int s; trak_t* trak = moov->traks_[i]; trak_t* ftrak = trak_init(); mdia_t* mdia = trak->mdia_; mdia_t* fmdia = mdia_init(); minf_t* minf = mdia->minf_; minf_t* fminf = minf_init(); stbl_t* stbl = minf->stbl_; stbl_t* fstbl = stbl_init(); fmoov->traks_[i] = ftrak; ftrak->tkhd_ = tkhd_copy(trak->tkhd_); ftrak->tkhd_->duration_ = 0; ftrak->mdia_ = fmdia; ftrak->samples_size_ = trak->samples_size_; ftrak->samples_ = (samples_t*) malloc((trak->samples_size_ + 1) * sizeof(samples_t)); memcpy(ftrak->samples_, trak->samples_, (trak->samples_size_ + 1) * sizeof(samples_t)); ftrak->smoothes_size_ = trak->smoothes_size_; ftrak->smoothes_ = (struct smooth_t*) malloc((trak->samples_size_ + 1) * sizeof(struct smooth_t)); memcpy(ftrak->smoothes_, trak->smoothes_, (trak->smoothes_size_ + 1) * sizeof(struct smooth_t)); fmdia->mdhd_ = mdhd_copy(mdia->mdhd_); // convert trak's timescale and duration fmdia->mdhd_->version_ = 1; fmdia->mdhd_->timescale_ = 10000000; fmdia->mdhd_->duration_ = 0; // trak_time_to_moov_time(fmdia->mdhd_->duration_, // fmdia->mdhd_->timescale_, mdia->mdhd_->timescale_); fmdia->hdlr_ = hdlr_copy(mdia->hdlr_); fmdia->minf_ = fminf; fminf->smhd_ = minf->smhd_ == NULL ? NULL : smhd_copy(minf->smhd_); fminf->vmhd_ = minf->vmhd_ == NULL ? NULL : vmhd_copy(minf->vmhd_); fminf->dinf_ = dinf_copy(minf->dinf_); fminf->stbl_ = fstbl; fstbl->stts_ = stts_init(); fstbl->ctts_ = ctts_init(); fstbl->stsz_ = stsz_init(); fstbl->stsc_ = stsc_init(); fstbl->stco_ = stco_init(); fstbl->stsd_ = stsd_copy(stbl->stsd_); for(s = 0; s != ftrak->samples_size_ + 1; ++s) { // SmoothStreaming uses a fixed 10000000 timescale ftrak->samples_[s].pts_ = trak_time_to_moov_time( ftrak->samples_[s].pts_, ftrak->mdia_->mdhd_->timescale_, trak->mdia_->mdhd_->timescale_); ftrak->samples_[s].cto_ = (unsigned int)(trak_time_to_moov_time( ftrak->samples_[s].cto_, ftrak->mdia_->mdhd_->timescale_, trak->mdia_->mdhd_->timescale_)); } { // update trak duration samples_t const* first = (samples_t const*)&ftrak->samples_[0]; samples_t const* last = (samples_t const*)&ftrak->samples_[ftrak->samples_size_]; ftrak->mdia_->mdhd_->duration_ = last->pts_ - first->pts_; ftrak->tkhd_->duration_ = trak_time_to_moov_time(ftrak->mdia_->mdhd_->duration_, fmoov->mvhd_->timescale_, ftrak->mdia_->mdhd_->timescale_); // update movie duration if(ftrak->tkhd_->duration_ > fmoov->mvhd_->duration_) { fmoov->mvhd_->duration_ = ftrak->tkhd_->duration_ ; } } { trex_t* trex = trex_init(); trex->track_id_ = trak->tkhd_->track_id_; trex->default_sample_description_index_ = 1; mvex->trexs_[mvex->tracks_] = trex; ++mvex->tracks_; } } { unsigned char* moov_data = mp4_context->moov_data; uint32_t moov_size = moov_write(fmoov, moov_data); bucket_insert_tail(buckets, bucket_init_memory(moov_data, moov_size)); filepos += moov_size; } } woov = woov_init(mp4_context, fmoov); mfra = mfra_init(); mfra->tracks_ = fmoov->tracks_; { unsigned int i; unsigned int tfra_entries = 0; for(i = 0; i != fmoov->tracks_; ++i) { trak_t const* trak = fmoov->traks_[i]; struct tfra_t* tfra = tfra_init(); mfra->tfras_[i] = tfra; tfra->version_ = 1; tfra->flags_ = 0; tfra->track_id_ = trak->tkhd_->track_id_; tfra->length_size_of_traf_num_ = 1; tfra->length_size_of_trun_num_ = 1; tfra->length_size_of_sample_num_ = 1; // count the number of smooth sync samples (nr of moofs) tfra->number_of_entry_ = 0; { unsigned int start; for(start = 0; start != trak->samples_size_; ++start) { { if(trak->samples_[start].is_smooth_ss_) { ++tfra->number_of_entry_; } } } } tfra->table_ = (tfra_table_t*) malloc(tfra->number_of_entry_ * sizeof(tfra_table_t)); tfra_entries += tfra->number_of_entry_; // next track } { unsigned int tfra_index = 0; trak_t const* base_trak = fmoov->traks_[0]; while(tfra_index != base_trak->smoothes_size_) { // insert moof bucket { moof_t* moof = moof_init(); bucket_t* bucket = bucket_init(BUCKET_TYPE_MEMORY); bucket_insert_tail(buckets, bucket); // create moof and write samples moof_create(mp4_context, fmoov, woov, moof, mfra, filepos, tfra_index+1, buckets, 0 /* OUTPUT_FORMAT_MP4 */); // if(options->output_format == OUTPUT_FORMAT_MP4) { unsigned int samples_count = 0; unsigned char* moof_data = NULL; unsigned int moof_size = 0; for(i = 0;i < moof->tracks_; ++i) samples_count += moof->trafs_[i]->trun_->sample_count_; moof_data = (unsigned char*)malloc(8192 + (samples_count) * 12); moof_size = moof_write(moof, moof_data); // now that we know the size of the moof atom, we know where the mdat // will start. We patch the 'data_offset' field to skip the // moof atom and the mdat header. moof->trafs_[0]->trun_->data_offset_ = moof_size + ATOM_PREAMBLE_SIZE; moof_size = moof_write(moof, moof_data); bucket->buf_ = malloc(moof_size); bucket->size_ = moof_size; memcpy(bucket->buf_, moof_data, (size_t)bucket->size_); free(moof_data); } moof_exit(moof); // advance filepos for moof and mdat atom while(*buckets != bucket) { filepos += bucket->size_; bucket = bucket->next_; } } // next fragment ++tfra_index; } } moov_exit(fmoov); { uint32_t woov_size = woov_write(mp4_context, woov, buckets); int offset = 0; offset += 28; // ftyp offset += woov_size; //woov offset += ATOM_PREAMBLE_SIZE; //mdat woov->mdat_size+=8; // header; if(woov->mdat_size > UINT32_MAX) { offset+=8; woov->mdat_size+=8; } for(i = 0; i != woov->moov->tracks_; ++i) { trak_t* trak = woov->moov->traks_[i]; stco_shift_offsets_inplace( (unsigned char*)trak->mdia_->minf_->stbl_->stco_->stco_inplace_, (int)offset); } write_32(woov_data + 8, woov_size); write_64(woov_data + 12, woov->mdat_size); bucket_insert_tail(buckets, bucket_init_memory(woov_data, sizeof(woov_data))); woov_exit(woov); } // Write the Movie Fragment Random Access (MFRA) atom { unsigned char* mfra_data = (unsigned char*)malloc(8192 + tfra_entries * 28); uint32_t mfra_size = mfra_write(mfra, mfra_data); bucket_insert_tail(buckets, bucket_init_memory(mfra_data, mfra_size)); mfra_exit(mfra); free(mfra_data); } } return result; }
static int moof_create(struct mp4_context_t const* mp4_context, struct moov_t* fmoov, struct woov_t* woov, struct moof_t* moof, struct mfra_t* mfra, uint64_t moof_offset, unsigned int seq, struct bucket_t** buckets, int output_raw) { uint32_t mdat_size = ATOM_PREAMBLE_SIZE; bucket_t* mdat_bucket = 0; unsigned int i = 0; uint64_t start_time = 0, end_time = 0; unsigned int start=0, end=0; if(!output_raw) { unsigned char mdat_buffer[32]; mp4_atom_t mdat_atom; int mdat_header_size; mdat_atom.type_ = FOURCC('m', 'd', 'a', 't'); mdat_atom.short_size_ = 0; mdat_header_size = mp4_atom_write_header(mdat_buffer, &mdat_atom); mdat_bucket = bucket_init_memory(mdat_buffer, mdat_header_size); bucket_insert_tail(buckets, mdat_bucket); } moof->mfhd_ = mfhd_init(); moof->mfhd_->sequence_number_ = seq; for(i = 0;i < fmoov->tracks_; i++) { uint32_t trun_mdat_size=0; struct trak_t * trak = fmoov->traks_[i]; struct stsd_t const* stsd = trak->mdia_->minf_->stbl_->stsd_; struct sample_entry_t const* sample_entry = &stsd->sample_entries_[0]; // int is_avc = sample_entry->fourcc_ == FOURCC('a', 'v', 'c', '1'); struct traf_t* traf = traf_init(); moof->trafs_[moof->tracks_] = traf; ++moof->tracks_; start = trak->smoothes_[moof->mfhd_->sequence_number_-1].start; end = trak->smoothes_[moof->mfhd_->sequence_number_].start; { // struct ctts_t const* ctts = trak->mdia_->minf_->stbl_->ctts_; unsigned int trun_index = 0; unsigned int s; struct bucket_t* bucket_prev = 0; traf->tfhd_ = tfhd_init(); // 0x000020 = default-sample-flags present traf->tfhd_->flags_ = 0x000020; traf->tfhd_->track_id_ = trak->tkhd_->track_id_; // sample_degradation_priority traf->tfhd_->default_sample_flags_ = 0x000000; // sample_is_difference_sample if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e')) { traf->tfhd_->default_sample_flags_ |= (1 << 16); } traf->trun_ = trun_init(); // 0x0001 = data-offset is present // 0x0004 = first_sample_flags is present // 0x0100 = sample-duration is present // 0x0200 = sample-size is present traf->trun_->flags_ = 0x000305; // 0x0800 = sample-composition-time-offset is present // if(ctts) { traf->trun_->flags_ |= 0x000800; } traf->trun_->sample_count_ = end - start; // traf->trun_->data_offset_ = // set below traf->trun_->first_sample_flags_= 0x00000000; traf->trun_->table_ = (trun_table_t*)malloc(traf->trun_->sample_count_ * sizeof(trun_table_t)); // traf->trun_->trak_ = trak; // traf->trun_->start_ = start; // traf->trun_->uuid0_pts_ = trak_time_to_moov_time( // trak->samples_[start].pts_, 10000000, trak->mdia_->mdhd_->timescale_); for(s = start; s != end; ++s) { uint64_t pts1 = trak->samples_[s + 1].pts_; uint64_t pts0 = trak->samples_[s + 0].pts_; unsigned int sample_duration = (unsigned int)(pts1 - pts0); uint64_t sample_pos = trak->samples_[s].pos_; unsigned int sample_size = trak->samples_[s].size_; unsigned int cto = trak->samples_[s].cto_; traf->trun_->table_[trun_index].sample_duration_ = sample_duration; traf->trun_->table_[trun_index].sample_size_ = sample_size; traf->trun_->table_[trun_index].sample_composition_time_offset_ = cto; MP4_INFO( "frame=%u pts=%"PRIi64" cto=%u duration=%u offset=%"PRIu64" size=%u\n", s, trak->samples_[s].pts_, trak->samples_[s].cto_, sample_duration, sample_pos, sample_size); if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e')) { #if 0 if(bucket_prev == NULL) { // TODO: return error when no SPS and PPS are available if(is_avc) { unsigned char* buffer; unsigned char* p; unsigned int sps_pps_size = sample_entry->nal_unit_length_ + sample_entry->sps_length_ + sample_entry->nal_unit_length_ + sample_entry->pps_length_; if(sps_pps_size == 0) { MP4_ERROR("%s", "[Error] No SPS or PPS available\n"); return 0; } buffer = (unsigned char*)malloc(sps_pps_size); p = buffer; // sps p = write_32(p, 0x00000001); memcpy(p, sample_entry->sps_, sample_entry->sps_length_); p += sample_entry->sps_length_; // pps p = write_32(p, 0x00000001); memcpy(p, sample_entry->pps_, sample_entry->pps_length_); p += sample_entry->pps_length_; bucket_insert_tail(buckets, bucket_init_memory(buffer, sps_pps_size)); free(buffer); traf->trun_->table_[trun_index].sample_size_ += sps_pps_size; mdat_size += sps_pps_size; trun_mdat_size += sps_pps_size; } } #endif #if 0 if(is_avc) { static const char nal_marker[4] = { 0, 0, 0, 1 }; uint64_t first = sample_pos; uint64_t last = sample_pos + sample_size; while(first != last) { unsigned char buffer[4]; unsigned int nal_size; bucket_insert_tail(buckets, bucket_init_memory(nal_marker, 4)); if(fseeko(mp4_context->infile, first, SEEK_SET) != 0) { MP4_ERROR("%s", "Reached end of file prematurely\n"); return 0; } if(fread(buffer, sample_entry->nal_unit_length_, 1, mp4_context->infile) != 1) { MP4_ERROR("%s", "Error reading NAL size\n"); return 0; } nal_size = read_n(buffer, sample_entry->nal_unit_length_ * 8); if(nal_size == 0) { MP4_ERROR("%s", "Invalid NAL size (0)\n"); return 0; } bucket_prev = bucket_init_file(first + sample_entry->nal_unit_length_, nal_size); bucket_insert_tail(buckets, bucket_prev); first += sample_entry->nal_unit_length_ + nal_size; } } else #endif { // try to merge buckets if(bucket_prev && sample_pos == bucket_prev->offset_ + bucket_prev->size_) { bucket_prev->size_ += sample_size; } else { bucket_prev = bucket_init_file(sample_pos, sample_size); bucket_insert_tail(buckets, bucket_prev); } } } else if(trak->mdia_->hdlr_->handler_type_ == FOURCC('s', 'o', 'u', 'n')) { // ADTS frame header if(sample_entry->wFormatTag == 0x00ff && output_raw) { unsigned char buffer[7]; sample_entry_get_adts(sample_entry, sample_size, buffer); bucket_insert_tail(buckets, bucket_init_memory(buffer, 7)); traf->trun_->table_[trun_index].sample_size_ += 7; mdat_size += 7; trun_mdat_size += 7; bucket_prev = NULL; } // try to merge buckets if(bucket_prev && sample_pos == bucket_prev->offset_ + bucket_prev->size_) { bucket_prev->size_ += sample_size; } else { bucket_prev = bucket_init_file(sample_pos, sample_size); bucket_insert_tail(buckets, bucket_prev); } } mdat_size += sample_size; trun_mdat_size += sample_size; ++trun_index; { // update woov track samples woov->moov->traks_[moof->tracks_-1]->samples_[woov->moov->traks_[moof->tracks_-1]->samples_size_++]=trak->samples_[s]; woov->moov->traks_[moof->tracks_-1]->samples_[woov->moov->traks_[moof->tracks_-1]->samples_size_]=trak->samples_[s+1]; } } { struct tfra_t* tfra = mfra->tfras_[moof->tracks_-1]; tfra_table_t* table = &tfra->table_[moof->mfhd_->sequence_number_-1]; table->time_ = trak->samples_[start].pts_; table->moof_offset_ = moof_offset; table->traf_number_ = moof->tracks_-1; table->trun_number_ = 0; table->sample_number_ = 0; stco_add_chunk(woov->moov->traks_[moof->tracks_-1]->mdia_->minf_->stbl_->stco_, woov->mdat_size); woov->mdat_size += trun_mdat_size; stsc_add_chunk(woov->moov->traks_[moof->tracks_-1]->mdia_->minf_->stbl_->stsc_, woov->moov->traks_[moof->tracks_-1]->mdia_->minf_->stbl_->stco_->entries_-1, end-start, 1); } // update size of mdat atom if(mdat_bucket) { write_32((unsigned char*)mdat_bucket->buf_, mdat_size); } } } return 1; }
extern int output_flv(struct mp4_context_t const* mp4_context, unsigned int* trak_sample_start, unsigned int* trak_sample_end, struct bucket_t** buckets, struct mp4_split_options_t* options) { struct moov_t* moov = mp4_context->moov; unsigned int track = 0; for(track = 0; track != moov->tracks_; ++track) { struct trak_t* trak = moov->traks_[track]; struct stsd_t const* stsd = trak->mdia_->minf_->stbl_->stsd_; struct sample_entry_t const* sample_entry = &stsd->sample_entries_[0]; unsigned int start_sample = trak_sample_start[track]; unsigned int end_sample = trak_sample_end[track]; unsigned int s; if(trak->mdia_->hdlr_->handler_type_ != FOURCC('v', 'i', 'd', 'e')) continue; if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e')) { unsigned char* buffer = (unsigned char*)malloc(1 + 1 + 3 + sample_entry->codec_private_data_length_); unsigned char* p = buffer; p = write_8(p, 0x17); p = write_8(p, RTMP_AVC_SEQUENCE_HEADER); p = write_24(p, 0); memcpy(p, sample_entry->codec_private_data_, sample_entry->codec_private_data_length_); p += sample_entry->codec_private_data_length_; bucket_insert_tail(buckets, bucket_init_memory(buffer, p - buffer)); free(buffer); } else if(trak->mdia_->hdlr_->handler_type_ == FOURCC('s', 'o', 'u', 'n')) { unsigned char* buffer = (unsigned char*)malloc(1 + 1 + sample_entry->codec_private_data_length_); unsigned char* p = buffer; p = write_8(p, 0xaf); p = write_8(p, RTMP_AAC_SEQUENCE_HEADER); memcpy(p, sample_entry->codec_private_data_, sample_entry->codec_private_data_length_); p += sample_entry->codec_private_data_length_; bucket_insert_tail(buckets, bucket_init_memory(buffer, p - buffer)); free(buffer); } else { continue; } for(s = start_sample; s != end_sample; ++s) { uint64_t sample_pos = trak->samples_[s].pos_; unsigned int sample_size = trak->samples_[s].size_; int cto = trak->samples_[s].cto_; // FLV uses a fixed 1000 timescale unsigned int composition_time = (unsigned int) (trak_time_to_moov_time(cto, 1000, trak->mdia_->mdhd_->timescale_)); MP4_INFO( "frame=%u pts=%u offset=%llu size=%u\n", s, composition_time, sample_pos, sample_size); if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e')) { // if(is_avc) { // VIDEODATA unsigned char header[5]; unsigned int is_keyframe = trak->samples_[s].is_ss_; unsigned int codec_id = 7; // AVC write_8(header, ((is_keyframe ? 1 : 2) << 4) + codec_id); write_8(header + 1, RTMP_AVC_NALU); write_24(header + 2, composition_time); bucket_insert_tail(buckets, bucket_init_memory(header, 5)); bucket_insert_tail(buckets, bucket_init_file(sample_pos, sample_size)); } } else { // AUDIODATA unsigned char header[2]; write_8(header, 0xaf); write_8(header + 1, RTMP_AAC_RAW); // AACAUDIODATA bucket_insert_tail(buckets, bucket_init_memory(header, 2)); bucket_insert_tail(buckets, bucket_init_file(sample_pos, sample_size)); } } } return 1; }