// traffic shaping: create offsets for each second
static void create_traffic_shaping(moov_t* moov,
                                   unsigned int const* trak_sample_start,
                                   unsigned int const* trak_sample_end,
                                   int64_t offset,
                                   mp4_split_options_t* options)
{
  uint64_t moov_duration = moov->mvhd_->duration_;
  long moov_time_scale = moov->mvhd_->timescale_;

  unsigned int track_index;
  int second;
  options->seconds =
    1 + (int)((moov_duration + moov_time_scale - 1) / moov_time_scale);
  options->byte_offsets = (uint64_t*)
    calloc(options->seconds, sizeof(options->byte_offsets[0]));
  for(track_index = 0; track_index != moov->tracks_; ++track_index)
  {
    struct trak_t* trak = moov->traks_[track_index];

    long trak_time_scale = trak->mdia_->mdhd_->timescale_;
    struct samples_t* samples = trak->samples_;

    unsigned int sample = trak_sample_start[track_index];
    unsigned int end_sample = trak_sample_end[track_index];
    uint64_t pts = samples[sample].pts_;

    second = 0;

    while(sample != end_sample)
    {
      uint64_t trak_end_offset = 0;
      while(sample != end_sample && samples[sample].pts_ <= pts)
      {
        trak_end_offset = samples[sample].pos_;
        trak_end_offset += samples[sample].size_;
        trak_end_offset += offset;
        ++sample;
      }

#if 0
      if(second < 20)
      {
        MP4_INFO("moov[%d]: offset=%"PRIu64"\n", second, trak_end_offset);
      }
#endif

      if(options->byte_offsets[second] < trak_end_offset)
        options->byte_offsets[second] = trak_end_offset;

      pts += trak_time_scale; // next second
      ++second;
    }
  }
}
// reported by everwanna:
// av out of sync because: 
// audio track 0 without stss, seek to the exact time. 
// video track 1 with stss, seek to the nearest key frame time.
//
// fixed:
// first pass we get the new aligned times for traks with an stss present
// second pass is for traks without an stss
static int get_aligned_start_and_end(struct mp4_context_t const* mp4_context,
                                     unsigned int start, unsigned int end,
                                     unsigned int* trak_sample_start,
                                     unsigned int* trak_sample_end)
{
  unsigned int pass;
  struct moov_t* moov = mp4_context->moov;
  long moov_time_scale = moov->mvhd_->timescale_;

  for(pass = 0; pass != 2; ++pass)
  {
    unsigned int i;
    for(i = 0; i != moov->tracks_; ++i)
    {
      struct trak_t* trak = moov->traks_[i];
      struct stbl_t* stbl = trak->mdia_->minf_->stbl_;
      long trak_time_scale = trak->mdia_->mdhd_->timescale_;

      // 1st pass: stss present, 2nd pass: no stss present
      if(pass == 0 && !stbl->stss_)
        continue;
      if(pass == 1 && stbl->stss_)
        continue;

      // get start
      if(start == 0)
      {
        trak_sample_start[i] = start;
      }
      else
      {
        start = stts_get_sample(stbl->stts_,
          moov_time_to_trak_time(start, moov_time_scale, trak_time_scale));

        MP4_INFO("start=%u (trac time)\n", start);
        MP4_INFO("start=%.2f (seconds)\n",
          stts_get_time(stbl->stts_, start) / (float)trak_time_scale);

        start = stbl_get_nearest_keyframe(stbl, start + 1) - 1;
        MP4_INFO("start=%u (zero based keyframe)\n", start);
        trak_sample_start[i] = start;
        start = (unsigned int)(trak_time_to_moov_time(
          stts_get_time(stbl->stts_, start), moov_time_scale, trak_time_scale));
        MP4_INFO("start=%u (moov time)\n", start);
        MP4_INFO("start=%.2f (seconds)\n", start / (float)moov_time_scale);
      }

      // get end
      if(end == 0)
      {
        // The default is till-the-end of the track
        trak_sample_end[i] = trak->samples_size_;
      }
      else
      {
        end = stts_get_sample(stbl->stts_,
          moov_time_to_trak_time(end, moov_time_scale, trak_time_scale));
        MP4_INFO("end=%u (trac time)\n", end);
        MP4_INFO("end=%.2f (seconds)\n",
          stts_get_time(stbl->stts_, end) / (float)trak_time_scale);

        if(end >= trak->samples_size_)
        {
          end = trak->samples_size_;
        }
        else
        {
          end = stbl_get_nearest_keyframe(stbl, end + 1) - 1;
        }
        MP4_INFO("end=%u (zero based keyframe)\n", end);
        trak_sample_end[i] = end;
//          MP4_INFO("endframe=%u, samples_size_=%u\n", end, trak->samples_size_);
        end = (unsigned int)trak_time_to_moov_time(
          stts_get_time(stbl->stts_, end), moov_time_scale, trak_time_scale);
        MP4_INFO("end=%u (moov time)\n", end);
        MP4_INFO("end=%.2f (seconds)\n", end / (float)moov_time_scale);
      }
    }
  }

  MP4_INFO("start=%u\n", start);
  MP4_INFO("end=%u\n", end);

  if(end && start >= end)
  {
    return 0;
  }

  return 1;
}
extern int output_mp4(struct mp4_context_t* mp4_context,
                      unsigned int const* trak_sample_start,
                      unsigned int const* trak_sample_end,
                      struct bucket_t** buckets,
                      struct mp4_split_options_t* options)
{
  unsigned int i;

  uint64_t mdat_start = mp4_context->mdat_atom.start_;
  uint64_t mdat_size = mp4_context->mdat_atom.size_;
  int64_t offset;

  struct moov_t* moov = mp4_context->moov;
//  unsigned char* moov_data = mp4_context->moov_data;
  unsigned char* moov_data = (unsigned char*)
    malloc((size_t)mp4_context->moov_atom.size_ + ATOM_PREAMBLE_SIZE + 1024);

  uint64_t moov_size;

  long moov_time_scale = moov->mvhd_->timescale_;
  uint64_t skip_from_start = UINT64_MAX;
  uint64_t end_offset = 0;

  uint64_t moov_duration = 0;

#if 1
  uint64_t new_mdat_start = 0;
  {
    static char const free_data[] = {
      0x0, 0x0, 0x0,  42, 'f', 'r', 'e', 'e',
      'v', 'i', 'd', 'e', 'o', ' ', 's', 'e',
      'r', 'v', 'e', 'd', ' ', 'b', 'y', ' ',
      'm', 'o', 'd', '_', 'h', '2', '6', '4',
      '_', 's', 't', 'r', 'e', 'a', 'm', 'i',
      'n', 'g'
    };
    uint32_t size_of_header = (uint32_t)mp4_context->ftyp_atom.size_ +
                              sizeof(free_data);
    unsigned char* buffer = (unsigned char*)malloc(size_of_header);

    if(mp4_context->ftyp_atom.size_)
    {
      fseeko(mp4_context->infile, mp4_context->ftyp_atom.start_, SEEK_SET);
      if(fread(buffer, (off_t)mp4_context->ftyp_atom.size_, 1, mp4_context->infile) != 1)
      {
        MP4_ERROR("%s", "Error reading ftyp atom\n");
        free(buffer);
        return 0;
      }
    }

    // copy free data
    memcpy(buffer + mp4_context->ftyp_atom.size_, free_data,
           sizeof(free_data));

    if(options->output_format == OUTPUT_FORMAT_MP4)
    {
      bucket_t* bucket = bucket_init_memory(buffer, size_of_header);
      bucket_insert_tail(buckets, bucket);
    }
    free(buffer);

    new_mdat_start += size_of_header;
  }

//  new_mdat_start += mp4_context->moov_atom.size_;
#endif

  offset = new_mdat_start - mp4_context->mdat_atom.start_;
  // subtract old moov size
//  offset -= mp4_context->moov_atom.size_;

  for(i = 0; i != moov->tracks_; ++i)
  {
    struct trak_t* trak = moov->traks_[i];
    struct stbl_t* stbl = trak->mdia_->minf_->stbl_;

    unsigned int start_sample = trak_sample_start[i];
    unsigned int end_sample = trak_sample_end[i];


    if (options->exact)
      trak_fast_forward_first_partial_GOP(mp4_context, options, trak, start_sample);

    trak_update_index(mp4_context, trak, start_sample, end_sample);

    if(trak->samples_size_ == 0)
    {
      MP4_WARNING("Trak %u contains no samples. Maybe a fragmented file?", i);
      return 1;
    }

    {
      uint64_t skip =
        trak->samples_[start_sample].pos_ - trak->samples_[0].pos_;
      if(skip < skip_from_start)
        skip_from_start = skip;
      MP4_INFO("Trak can skip %"PRIu64" bytes\n", skip);

      if(end_sample != trak->samples_size_)
      {
        uint64_t end_pos = trak->samples_[end_sample].pos_;
        if(end_pos > end_offset)
          end_offset = end_pos;
        MP4_INFO("New endpos=%"PRIu64"\n", end_pos);
        MP4_INFO("Trak can skip %"PRIu64" bytes at end\n",
               mdat_start + mdat_size - end_offset);
      }
    }

    {
      // fixup trak (duration)
      uint64_t trak_duration = stts_get_duration(stbl->stts_);
      long trak_time_scale = trak->mdia_->mdhd_->timescale_;
      {
        uint64_t duration = trak_time_to_moov_time(trak_duration,
          moov_time_scale, trak_time_scale);
        trak->mdia_->mdhd_->duration_= trak_duration;
        trak->tkhd_->duration_ = duration;
        MP4_INFO("trak: new_duration=%"PRIu64"\n", duration);

        if(duration > moov_duration)
          moov_duration = duration;
      }
    }

//      MP4_INFO("stco.size=%d, ", read_int32(stbl->stco_ + 4));
//      MP4_INFO("stts.size=%d samples=%d\n", read_int32(stbl->stts_ + 4), stts_get_samples(stbl->stts_));
//      MP4_INFO("stsz.size=%d\n", read_int32(stbl->stsz_ + 8));
//      MP4_INFO("stsc.samples=%d\n", stsc_get_samples(stbl->stsc_));
  }
  moov->mvhd_->duration_ = moov_duration;
  MP4_INFO("moov: new_duration=%.2f seconds\n", moov_duration / (float)moov_time_scale);

  // subtract bytes we skip at the front of the mdat atom
  offset -= skip_from_start;

  MP4_INFO("%s", "moov: writing header\n");

  moov_write(moov, moov_data);
  moov_size = read_32(moov_data);

  // add new moov size
  offset += moov_size;

  MP4_INFO("shifting offsets by %"PRId64"\n", offset);
  moov_shift_offsets_inplace(moov, offset);

  // traffic shaping: create offsets for each second
  create_traffic_shaping(moov,
                         trak_sample_start,
                         trak_sample_end,
                         offset,
                         options);

#ifdef COMPRESS_MOOV_ATOM
  if(!options->client_is_flash)
  {
    compress_moov(mp4_context, moov, moov_data, &moov_size);
  }
#endif

  if(end_offset != 0)
  {
    MP4_INFO("mdat_size=%"PRId64" end_offset=%"PRId64"\n",
             mdat_size, end_offset);
    mdat_size = end_offset - mdat_start;
  }
  mdat_start += skip_from_start;
  mdat_size -= skip_from_start;

  MP4_INFO("mdat_bucket(%"PRId64", %"PRId64")\n", mdat_start, mdat_size);

  bucket_insert_tail(buckets, bucket_init_memory(moov_data, moov_size));
  free(moov_data);

  {
    struct mp4_atom_t mdat_atom;
    mdat_atom.type_ = FOURCC('m', 'd', 'a', 't');
    mdat_atom.short_size_ = 0; // TODO: use original small/wide mdat box

    if(options->adaptive)
    {
      // empty mdat atom
      mdat_atom.size_ = ATOM_PREAMBLE_SIZE;
    }
    else
    {
      mdat_atom.size_ = mdat_size;
    }

    {
      unsigned char buffer[32];
      int mdat_header_size = mp4_atom_write_header(buffer, &mdat_atom);
      bucket_insert_tail(buckets,
        bucket_init_memory(buffer, mdat_header_size));

      if(mdat_atom.size_ - mdat_header_size)
      {
        bucket_insert_tail(buckets,
          bucket_init_file(mdat_start + mdat_header_size,
                           mdat_atom.size_ - mdat_header_size));
      }
    }
  }

  return 1;
}
static void trak_fast_forward_first_partial_GOP(struct mp4_context_t const* mp4_context, 
                                                struct mp4_split_options_t* options, 
                                                struct trak_t *trak, 
                                                unsigned int start_sample)
{
  if (!trak->mdia_->minf_->stbl_->stts_){
    MP4_WARNING("FFGOP: NO STTS FOR THIS TRACK -- CANNOT ADJUST THIS TRACK\n","");
    return;
  }
  // NOTE: STTS atom = "time to sample" atom, which is what we use
  //  (and STSS atom = "sync samples" atom, which is list of keyframes)
  struct stts_t* stts = trak->mdia_->minf_->stbl_->stts_;
  
    

  // find the sample frame location of the exact desired time we wanted to 
  // start at (regardless of keyframes!)
  struct moov_t* moov = mp4_context->moov;
  float moov_time_scale = moov->mvhd_->timescale_;
  float trak_time_scale = trak->mdia_->mdhd_->timescale_;
  unsigned int start_exact_time_sample = stts_get_sample(stts, moov_time_to_trak_time((options->start * moov_time_scale), moov_time_scale, trak_time_scale));

  if (start_exact_time_sample == start_sample)
    return; // starting at wanted time already, nothing to do!

  MP4_INFO("FFGOP: start: %fs;  sample start exact time:%u;  sample keyframe just before:%u\n", 
           options->start, start_exact_time_sample, start_sample);
  MP4_INFO("FFGOP: moov_time_scale = %f, trak_time_scale = %f\n", moov_time_scale, trak_time_scale);

  
  

  // In practice, IA videos seem to always have stts->entries_ == 1 8-)
  // That's the starting number / table setup.
  // The STTS atom will be rewritten by caller, expanding to more entries since we dropping durations!
  unsigned int s=0, i=0, j=0, nRewritten=0;
  for (j=0; j < stts->entries_; j++){ 
    for (i=0; i < stts->table_[j].sample_count_; i++){
      // NOTE: begin time-shifting at "start_sample" bec. mod_h264_streaming 
      // finds the keyframe (sample time) before the exact start time, and *then*
      // decrements by one.  so those samples "go out the door" -- and thus we
      // need to rewrite them, too
      if (s >= start_sample  &&  s < start_exact_time_sample){
        /* see mp4_io.h for samples_t (pts_/size_/pos_/cto_/is_ss_/is_smooth_ss_) */
        samples_t sample = trak->samples_[s];
        // let's change current PTS to something fractionally *just* less than
        // the PTS of the first frame we want to see fully.  each frame we dont want
        // to see is 1 fraction earlier PTS than the next frame PTS.
        uint64_t pts  = sample.pts_;
        uint64_t pts2 = trak->samples_[start_exact_time_sample].pts_ - (start_exact_time_sample-s);
        //uint64_t pts2 = trak->samples_[start_exact_time_sample].pts_ + (s <= (start_sample+1) ? -2 : -1);
        trak->samples_[s].pts_ = pts2;
        MP4_INFO("FFGOP: stts[%d] samples_[%d].pts_ = %lu (%0.3fsec)  REWRITING TO %lu (%0.3fsec)\n", 
                 j, s, pts, ((float)pts / trak_time_scale), pts2, ((float)pts2 / trak_time_scale));
        nRewritten++;
      }
      s++;
    }
  }

  if (nRewritten){
    MP4_WARNING("FFGOP: ==============>  %u FRAMES GOT FAST-FORWARDED (APPROXIMATELY %2.1f SECONDS ASSUMING 29.97 fps, YMMV)\n\n", nRewritten, nRewritten/29.97);
  }
}
static void compress_moov(struct mp4_context_t* mp4_context,
                          struct moov_t* moov,
                          unsigned char* moov_data,
                          uint64_t* moov_size)
{
  uLong sourceLen = (uLong)(*moov_size - ATOM_PREAMBLE_SIZE);
  uLong destLen = compressBound(sourceLen);
  unsigned char* cmov = (unsigned char*)malloc(destLen);
  int zstatus = compress(cmov, &destLen, moov_data, sourceLen);
  if(zstatus == Z_OK)
  {
    MP4_INFO("cmov size = %lu (%ld%%)\n", destLen, 100 * destLen / sourceLen);
  }

  {
    const int extra_space = 4096;
    if(destLen + extra_space < sourceLen)
    {
      const int bytes_saved = sourceLen - destLen;
      uLong destLen2;
      int extra = 0;
      MP4_INFO("shifting offsets by %d\n", -bytes_saved);
      moov_shift_offsets_inplace(moov, -bytes_saved);

      extra += ATOM_PREAMBLE_SIZE + 4;            // dcom
      extra += ATOM_PREAMBLE_SIZE + 4;            // cmvd
      extra += ATOM_PREAMBLE_SIZE;                // cmov
      extra += ATOM_PREAMBLE_SIZE + extra_space;  // free

      MP4_INFO("shifting offsets by %d\n", extra);
      moov_shift_offsets_inplace(moov, extra);

      // recompress
      destLen2 = compressBound(sourceLen);
      zstatus = compress(cmov, &destLen2, moov_data, sourceLen);
      if(zstatus == Z_OK)
      {
        MP4_INFO("cmov size = %lu (%ld%%)\n", destLen2, 100 * destLen2 / sourceLen);

        if(destLen2 < destLen + extra_space)
        {
          // copy compressed movie atom
          unsigned char* outbuffer = moov_data;

          uint32_t dcom_size = ATOM_PREAMBLE_SIZE + 4;
          uint32_t cmvd_size = ATOM_PREAMBLE_SIZE + 4 + destLen2;
          uint32_t cmov_size = ATOM_PREAMBLE_SIZE + dcom_size + cmvd_size;
          uint32_t free_size = ATOM_PREAMBLE_SIZE + extra_space + destLen - destLen2;
          *moov_size = ATOM_PREAMBLE_SIZE + cmov_size + free_size;

          outbuffer = write_32(outbuffer, (uint32_t)*moov_size);

          // skip 'moov'
          outbuffer += 4;

          outbuffer = write_32(outbuffer, cmov_size);
          {
            outbuffer = write_32(outbuffer, FOURCC('c', 'm', 'o', 'v'));
            outbuffer = write_32(outbuffer, dcom_size);
            outbuffer = write_32(outbuffer, FOURCC('d', 'c', 'o', 'm'));
            outbuffer = write_32(outbuffer, FOURCC('z', 'l', 'i', 'b'));

            outbuffer = write_32(outbuffer, cmvd_size);
            {
              outbuffer = write_32(outbuffer, FOURCC('c', 'm', 'v', 'd'));
              outbuffer = write_32(outbuffer, sourceLen);
              memcpy(outbuffer, cmov, destLen2);
              outbuffer += destLen2;
            }
          }

          // add final padding
          outbuffer = write_32(outbuffer, free_size);
          outbuffer = write_32(outbuffer, FOURCC('f', 'r', 'e', 'e'));
          {
            const char free_bytes[8] =
            {
              'C', 'o', 'd', 'e','S','h', 'o', 'p'
            };
            uint32_t padding_index;
            for(padding_index = ATOM_PREAMBLE_SIZE; padding_index != free_size; ++padding_index)
            {
              outbuffer[padding_index] = free_bytes[padding_index % 8];
            }
          }
        }
        else
        {
          MP4_ERROR("%s", "2nd pass compress overflow\n");
        }
      }
    }
  }
  free(cmov);
}
Пример #6
0
static int moof_create(struct mp4_context_t const* mp4_context,
struct moov_t* fmoov,
struct woov_t* woov,
struct moof_t* moof,
struct mfra_t* mfra,
	uint64_t moof_offset,
	unsigned int seq,
struct bucket_t** buckets,
	int output_raw)
{
	uint32_t mdat_size = ATOM_PREAMBLE_SIZE;
	bucket_t* mdat_bucket = 0;
	unsigned int i = 0;
	uint64_t start_time = 0, end_time = 0;
	unsigned int start=0, end=0;
	if(!output_raw)
	{
		unsigned char mdat_buffer[32];
		mp4_atom_t mdat_atom;
		int mdat_header_size;
		mdat_atom.type_ = FOURCC('m', 'd', 'a', 't');
		mdat_atom.short_size_ = 0;
		mdat_header_size = mp4_atom_write_header(mdat_buffer, &mdat_atom);
		mdat_bucket = bucket_init_memory(mdat_buffer, mdat_header_size);
		bucket_insert_tail(buckets, mdat_bucket);
	}

	moof->mfhd_ = mfhd_init();
	moof->mfhd_->sequence_number_ = seq;

	for(i = 0;i < fmoov->tracks_; i++)
	{
		uint32_t trun_mdat_size=0;
		struct trak_t * trak = fmoov->traks_[i];
		struct stsd_t const* stsd = trak->mdia_->minf_->stbl_->stsd_;
		struct sample_entry_t const* sample_entry = &stsd->sample_entries_[0];
		//    int is_avc = sample_entry->fourcc_ == FOURCC('a', 'v', 'c', '1');

		struct traf_t* traf = traf_init();
		moof->trafs_[moof->tracks_] = traf;
		++moof->tracks_;

		start = trak->smoothes_[moof->mfhd_->sequence_number_-1].start;
		end = trak->smoothes_[moof->mfhd_->sequence_number_].start;


		{
			//      struct ctts_t const* ctts = trak->mdia_->minf_->stbl_->ctts_;
			unsigned int trun_index = 0;
			unsigned int s;
			struct bucket_t* bucket_prev = 0;

			traf->tfhd_ = tfhd_init();
			// 0x000020 = default-sample-flags present
			traf->tfhd_->flags_ = 0x000020;
			traf->tfhd_->track_id_ = trak->tkhd_->track_id_;
			// sample_degradation_priority
			traf->tfhd_->default_sample_flags_ = 0x000000;
			// sample_is_difference_sample
			if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e'))
			{
				traf->tfhd_->default_sample_flags_ |= (1 << 16);
			}

			traf->trun_ = trun_init();
			// 0x0001 = data-offset is present
			// 0x0004 = first_sample_flags is present
			// 0x0100 = sample-duration is present
			// 0x0200 = sample-size is present
			traf->trun_->flags_ = 0x000305;
			// 0x0800 = sample-composition-time-offset is present
			//      if(ctts)
			{
				traf->trun_->flags_ |= 0x000800;
			}

			traf->trun_->sample_count_ = end - start;
			//    traf->trun_->data_offset_ = // set below
			traf->trun_->first_sample_flags_= 0x00000000;
			traf->trun_->table_ = (trun_table_t*)malloc(traf->trun_->sample_count_ * sizeof(trun_table_t));

			//      traf->trun_->trak_ = trak;
			//      traf->trun_->start_ = start;
			//    traf->trun_->uuid0_pts_ = trak_time_to_moov_time(
			//        trak->samples_[start].pts_, 10000000, trak->mdia_->mdhd_->timescale_);

			for(s = start; s != end; ++s)
			{
				uint64_t pts1 = trak->samples_[s + 1].pts_;
				uint64_t pts0 = trak->samples_[s + 0].pts_;

				unsigned int sample_duration = (unsigned int)(pts1 - pts0);

				uint64_t sample_pos = trak->samples_[s].pos_;
				unsigned int sample_size = trak->samples_[s].size_;
				unsigned int cto = trak->samples_[s].cto_;

				traf->trun_->table_[trun_index].sample_duration_ = sample_duration;
				traf->trun_->table_[trun_index].sample_size_ = sample_size;
				traf->trun_->table_[trun_index].sample_composition_time_offset_ = cto;

				MP4_INFO(
					"frame=%u pts=%"PRIi64" cto=%u duration=%u offset=%"PRIu64" size=%u\n",
					s,
					trak->samples_[s].pts_,
					trak->samples_[s].cto_,
					sample_duration,
					sample_pos, sample_size);

				if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e'))
				{
#if 0
					if(bucket_prev == NULL)
					{
						// TODO: return error when no SPS and PPS are available
						if(is_avc)
						{
							unsigned char* buffer;
							unsigned char* p;
							unsigned int sps_pps_size =
								sample_entry->nal_unit_length_ + sample_entry->sps_length_ +
								sample_entry->nal_unit_length_ + sample_entry->pps_length_;

							if(sps_pps_size == 0)
							{
								MP4_ERROR("%s", "[Error] No SPS or PPS available\n");
								return 0;
							}

							buffer = (unsigned char*)malloc(sps_pps_size);
							p = buffer;

							// sps
							p = write_32(p, 0x00000001);
							memcpy(p, sample_entry->sps_, sample_entry->sps_length_);
							p += sample_entry->sps_length_;

							// pps
							p = write_32(p, 0x00000001);
							memcpy(p, sample_entry->pps_, sample_entry->pps_length_);
							p += sample_entry->pps_length_;

							bucket_insert_tail(buckets, bucket_init_memory(buffer, sps_pps_size));
							free(buffer);

							traf->trun_->table_[trun_index].sample_size_ += sps_pps_size;
							mdat_size += sps_pps_size;
							trun_mdat_size += sps_pps_size;
						}
					}
#endif

#if 0
					if(is_avc)
					{
						static const char nal_marker[4] = { 0, 0, 0, 1 };
						uint64_t first = sample_pos;
						uint64_t last = sample_pos + sample_size;
						while(first != last)
						{
							unsigned char buffer[4];
							unsigned int nal_size;
							bucket_insert_tail(buckets, bucket_init_memory(nal_marker, 4));

							if(fseeko(mp4_context->infile, first, SEEK_SET) != 0)
							{
								MP4_ERROR("%s", "Reached end of file prematurely\n");
								return 0;
							}
							if(fread(buffer, sample_entry->nal_unit_length_, 1, mp4_context->infile) != 1)
							{
								MP4_ERROR("%s", "Error reading NAL size\n");
								return 0;
							}
							nal_size = read_n(buffer, sample_entry->nal_unit_length_ * 8);

							if(nal_size == 0)
							{
								MP4_ERROR("%s", "Invalid NAL size (0)\n");
								return 0;
							}

							bucket_prev = bucket_init_file(first + sample_entry->nal_unit_length_, nal_size);
							bucket_insert_tail(buckets, bucket_prev);

							first += sample_entry->nal_unit_length_ + nal_size;
						}
					}
					else
#endif
					{
						// try to merge buckets
						if(bucket_prev &&
							sample_pos == bucket_prev->offset_ + bucket_prev->size_)
						{
							bucket_prev->size_ += sample_size;
						}
						else
						{
							bucket_prev = bucket_init_file(sample_pos, sample_size);
							bucket_insert_tail(buckets, bucket_prev);
						}
					}
				} else
					if(trak->mdia_->hdlr_->handler_type_ == FOURCC('s', 'o', 'u', 'n'))
					{
						// ADTS frame header
						if(sample_entry->wFormatTag == 0x00ff && output_raw)
						{
							unsigned char buffer[7];
							sample_entry_get_adts(sample_entry, sample_size, buffer);

							bucket_insert_tail(buckets, bucket_init_memory(buffer, 7));

							traf->trun_->table_[trun_index].sample_size_ += 7;
							mdat_size += 7;
							trun_mdat_size += 7;

							bucket_prev = NULL;
						}

						// try to merge buckets
						if(bucket_prev &&
							sample_pos == bucket_prev->offset_ + bucket_prev->size_)
						{
							bucket_prev->size_ += sample_size;
						}
						else
						{
							bucket_prev = bucket_init_file(sample_pos, sample_size);
							bucket_insert_tail(buckets, bucket_prev);
						}
					}

					mdat_size += sample_size;
					trun_mdat_size += sample_size;

					++trun_index;

					{
						// update woov track samples
						woov->moov->traks_[moof->tracks_-1]->samples_[woov->moov->traks_[moof->tracks_-1]->samples_size_++]=trak->samples_[s];
						woov->moov->traks_[moof->tracks_-1]->samples_[woov->moov->traks_[moof->tracks_-1]->samples_size_]=trak->samples_[s+1];
					}
			}
			{

				struct tfra_t* tfra = mfra->tfras_[moof->tracks_-1];
				tfra_table_t* table = &tfra->table_[moof->mfhd_->sequence_number_-1];
				table->time_ = trak->samples_[start].pts_;
				table->moof_offset_ = moof_offset;
				table->traf_number_ = moof->tracks_-1;
				table->trun_number_ = 0;
				table->sample_number_ = 0;
				stco_add_chunk(woov->moov->traks_[moof->tracks_-1]->mdia_->minf_->stbl_->stco_, woov->mdat_size);
				woov->mdat_size += trun_mdat_size;
				stsc_add_chunk(woov->moov->traks_[moof->tracks_-1]->mdia_->minf_->stbl_->stsc_, 
					woov->moov->traks_[moof->tracks_-1]->mdia_->minf_->stbl_->stco_->entries_-1, end-start, 1); 

			}
			// update size of mdat atom
			if(mdat_bucket)
			{
				write_32((unsigned char*)mdat_bucket->buf_, mdat_size);
			}
		}
	}

	return 1;
}
Пример #7
0
extern int output_flv(struct mp4_context_t const* mp4_context,
                      unsigned int* trak_sample_start,
                      unsigned int* trak_sample_end,
                      struct bucket_t** buckets,
                      struct mp4_split_options_t* options)
{
  struct moov_t* moov = mp4_context->moov;
  unsigned int track = 0;

  for(track = 0; track != moov->tracks_; ++track)
  {
    struct trak_t* trak = moov->traks_[track];
    struct stsd_t const* stsd = trak->mdia_->minf_->stbl_->stsd_;
    struct sample_entry_t const* sample_entry = &stsd->sample_entries_[0];
    unsigned int start_sample = trak_sample_start[track];
    unsigned int end_sample = trak_sample_end[track];
    unsigned int s;

    if(trak->mdia_->hdlr_->handler_type_ != FOURCC('v', 'i', 'd', 'e'))
      continue;

    if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e'))
    {
      unsigned char* buffer = (unsigned char*)malloc(1 + 1 + 3 + sample_entry->codec_private_data_length_);
      unsigned char* p = buffer;

      p = write_8(p, 0x17);
      p = write_8(p, RTMP_AVC_SEQUENCE_HEADER);
      p = write_24(p, 0);
      memcpy(p, sample_entry->codec_private_data_,
             sample_entry->codec_private_data_length_);
      p += sample_entry->codec_private_data_length_;
      bucket_insert_tail(buckets, bucket_init_memory(buffer, p - buffer));
      free(buffer);
    } else
    if(trak->mdia_->hdlr_->handler_type_ == FOURCC('s', 'o', 'u', 'n'))
    {
      unsigned char* buffer = (unsigned char*)malloc(1 + 1 + sample_entry->codec_private_data_length_);
      unsigned char* p = buffer;

      p = write_8(p, 0xaf);
      p = write_8(p, RTMP_AAC_SEQUENCE_HEADER);

      memcpy(p, sample_entry->codec_private_data_,
             sample_entry->codec_private_data_length_);
      p += sample_entry->codec_private_data_length_;
      bucket_insert_tail(buckets, bucket_init_memory(buffer, p - buffer));
      free(buffer);
    } else
    {
      continue;
    }

    for(s = start_sample; s != end_sample; ++s)
    {
      uint64_t sample_pos = trak->samples_[s].pos_;
      unsigned int sample_size = trak->samples_[s].size_;
      int cto = trak->samples_[s].cto_;

      // FLV uses a fixed 1000 timescale
      unsigned int composition_time = (unsigned int)
        (trak_time_to_moov_time(cto, 1000, trak->mdia_->mdhd_->timescale_));

      MP4_INFO(
        "frame=%u pts=%u offset=%llu size=%u\n",
        s, composition_time, sample_pos, sample_size);

      if(trak->mdia_->hdlr_->handler_type_ == FOURCC('v', 'i', 'd', 'e'))
      {
//        if(is_avc)
        {
          // VIDEODATA
          unsigned char header[5];
          unsigned int is_keyframe = trak->samples_[s].is_ss_;
          unsigned int codec_id = 7;          // AVC
          write_8(header, ((is_keyframe ? 1 : 2) << 4) + codec_id);

          write_8(header + 1, RTMP_AVC_NALU);
          write_24(header + 2, composition_time);
          bucket_insert_tail(buckets, bucket_init_memory(header, 5));
          bucket_insert_tail(buckets, bucket_init_file(sample_pos, sample_size));
        }
      }
      else
      {
        // AUDIODATA
        unsigned char header[2];
        write_8(header, 0xaf);
        write_8(header + 1, RTMP_AAC_RAW);
        // AACAUDIODATA
        bucket_insert_tail(buckets, bucket_init_memory(header, 2));
        bucket_insert_tail(buckets, bucket_init_file(sample_pos, sample_size));
      }
    }
  }

  return 1;
}
Пример #8
0
// reported by everwanna:
// av out of sync because: 
// audio track 0 without stss, seek to the exact time. 
// video track 1 with stss, seek to the nearest key frame time.
//
// fixed:
// first pass we get the new aligned times for traks with an stss present
// second pass is for traks without an stss
static int get_aligned_start_and_end(mp4_context_t const* mp4_context,
                                     int64_t start, int64_t end,
                                     unsigned int* trak_sample_start,
                                     unsigned int* trak_sample_end)
{
  unsigned int pass;
  moov_t const* moov = mp4_context->moov;
  long moov_time_scale = moov->mvhd_->timescale_;

  for(pass = 0; pass != 2; ++pass)
  {
    unsigned int i;
    for(i = 0; i != moov->tracks_; ++i)
    {
      trak_t const* trak = moov->traks_[i];
      long trak_time_scale = trak->mdia_->mdhd_->timescale_;

      if(trak->samples_size_ == 0)
      {
        trak_sample_start[i] = 0;
        trak_sample_end[i] = 0;
        continue;
      }

      // get start
      {
        unsigned int sample_index = time_to_sample(trak,
            moov_time_to_trak_time(start, moov_time_scale, trak_time_scale));

        // backtrack to nearest keyframe
        if(!trak->samples_[sample_index].is_ss_)
        {
          while(sample_index && !trak->samples_[sample_index].is_ss_)
          {
            --sample_index;
          }
          start = trak_time_to_moov_time(trak->samples_[sample_index].pts_,
                                         moov_time_scale, trak_time_scale);
        }
        trak_sample_start[i] = sample_index;
//        MP4_INFO("ts=%"PRId64" (moov time)\n", start);
        MP4_INFO("ts=%.2f (seconds)\n",
                 trak->samples_[sample_index].pts_ / (float)trak_time_scale);
      }

      // get end
      {
        unsigned int sample_index;
        if(end == 0)
        {
          // The default is till-the-end of the track
          sample_index = trak->samples_size_;
        }
        else
        {
          sample_index = time_to_sample(trak,
              moov_time_to_trak_time(end, moov_time_scale, trak_time_scale));
        }

        // backtrack to nearest keyframe
        if(!trak->samples_[sample_index].is_ss_)
        {
          while(sample_index && !trak->samples_[sample_index].is_ss_)
          {
            --sample_index;
          }
          end = trak_time_to_moov_time(trak->samples_[sample_index].pts_,
                                       moov_time_scale, trak_time_scale);
        }
        trak_sample_end[i] = sample_index;
//        MP4_INFO("te=%"PRId64" (moov time)\n", end);
        MP4_INFO("te=%.2f (seconds)\n",
                 trak->samples_[sample_index].pts_ / (float)trak_time_scale);
      }
    }
  }

  MP4_INFO("final start=%"PRId64"\n", start);
  MP4_INFO("final end=%"PRId64"\n", end);

  if(end && start >= end)
  {
    return 0;
  }

  return 1;
}