示例#1
0
nsresult
TrackExtendsBox::Generate(uint32_t* aBoxSize)
{
  track_ID = mControl->GetTrackID(mTrackType);

  if (mTrackType == Audio_Track) {
    default_sample_description_index = 1;
    default_sample_duration = mMeta.mAudMeta->FrameDuration;
    default_sample_size = mMeta.mAudMeta->FrameSize;
    default_sample_flags = set_sample_flags(1);
  } else if (mTrackType == Video_Track) {
    default_sample_description_index = 1;
    default_sample_duration =
      mMeta.mVidMeta->VideoFrequency / mMeta.mVidMeta->FrameRate;
    default_sample_size = 0;
    default_sample_flags = set_sample_flags(0);
  } else {
    MOZ_ASSERT(0);
    return NS_ERROR_FAILURE;
  }

  size += sizeof(track_ID) +
          sizeof(default_sample_description_index) +
          sizeof(default_sample_duration) +
          sizeof(default_sample_size) +
          sizeof(default_sample_flags);

  *aBoxSize = size;

  return NS_OK;
}
示例#2
0
uint32_t
TrackRunBox::fillSampleTable()
{
  uint32_t table_size = 0;
  nsresult rv;
  nsTArray<nsRefPtr<EncodedFrame>> frames;
  FragmentBuffer* frag = mControl->GetFragment(mTrackType);

  rv = frag->GetFirstFragment(frames);
  if (NS_FAILED(rv)) {
    return 0;
  }
  uint32_t len = frames.Length();
  sample_info_table = new tbl[len];
  for (uint32_t i = 0; i < len; i++) {
    sample_info_table[i].sample_duration = 0;
    sample_info_table[i].sample_size = frames.ElementAt(i)->GetFrameData().Length();
    mAllSampleSize += sample_info_table[i].sample_size;
    table_size += sizeof(uint32_t);
    if (flags.to_ulong() & flags_sample_flags_present) {
      sample_info_table[i].sample_flags =
        set_sample_flags(
          (frames.ElementAt(i)->GetFrameType() == EncodedFrame::I_FRAME));
      table_size += sizeof(uint32_t);
    } else {
      sample_info_table[i].sample_flags = 0;
    }
    sample_info_table[i].sample_composition_time_offset = 0;
  }
  return table_size;
}
示例#3
0
nsresult
TrackExtendsBox::Generate(uint32_t* aBoxSize)
{
  track_ID = (mTrackType == Audio_Track ?
                mControl->GetTrackID(mAudioMeta->GetKind()) :
                mControl->GetTrackID(mVideoMeta->GetKind()));

  if (mTrackType == Audio_Track) {
    default_sample_description_index = 1;
    default_sample_duration = mAudioMeta->GetAudioFrameDuration();
    default_sample_size = mAudioMeta->GetAudioFrameSize();
    default_sample_flags = set_sample_flags(1);
  } else if (mTrackType == Video_Track) {
    default_sample_description_index = 1;
    // Video meta data has assigned framerate, it implies that this video's
    // frame rate should be fixed.
    if (mVideoMeta->GetVideoFrameRate()) {
      default_sample_duration =
        mVideoMeta->GetVideoClockRate() / mVideoMeta->GetVideoFrameRate();
    }
    default_sample_size = 0;
    default_sample_flags = set_sample_flags(0);
  } else {
    MOZ_ASSERT(0);
    return NS_ERROR_FAILURE;
  }

  size += sizeof(track_ID) +
          sizeof(default_sample_description_index) +
          sizeof(default_sample_duration) +
          sizeof(default_sample_size) +
          sizeof(default_sample_flags);

  *aBoxSize = size;

  return NS_OK;
}
示例#4
0
uint32_t
TrackRunBox::fillSampleTable()
{
  uint32_t table_size = 0;
  nsresult rv;
  nsTArray<RefPtr<EncodedFrame>> frames;
  FragmentBuffer* frag = mControl->GetFragment(mTrackType);

  rv = frag->GetFirstFragment(frames);
  if (NS_FAILED(rv)) {
    return 0;
  }
  uint32_t len = frames.Length();
  sample_info_table = MakeUnique<tbl[]>(len);
  // Create sample table according to 14496-12 8.8.8.2.
  for (uint32_t i = 0; i < len; i++) {
    // Sample size.
    sample_info_table[i].sample_size = 0;
    if (flags.to_ulong() & flags_sample_size_present) {
      sample_info_table[i].sample_size = frames.ElementAt(i)->GetFrameData().Length();
      mAllSampleSize += sample_info_table[i].sample_size;
      table_size += sizeof(uint32_t);
    }

    // Sample flags.
    sample_info_table[i].sample_flags = 0;
    if (flags.to_ulong() & flags_sample_flags_present) {
      sample_info_table[i].sample_flags =
        set_sample_flags(
          (frames.ElementAt(i)->GetFrameType() == EncodedFrame::AVC_I_FRAME));
      table_size += sizeof(uint32_t);
    }

    // Sample duration.
    sample_info_table[i].sample_duration = 0;
    if (flags.to_ulong() & flags_sample_duration_present) {
      // Calculate each frame's duration, it is decided by "current frame
      // timestamp - last frame timestamp".
      uint64_t frame_time = 0;
      if (i == 0) {
        frame_time = frames.ElementAt(i)->GetTimeStamp() -
                     frag->GetLastFragmentLastFrameTime();
      } else {
        frame_time = frames.ElementAt(i)->GetTimeStamp() -
                     frames.ElementAt(i - 1)->GetTimeStamp();
        // Keep the last frame time of current fagment, it will be used to calculate
        // the first frame duration of next fragment.
        if ((len - 1) == i) {
          frag->SetLastFragmentLastFrameTime(frames.ElementAt(i)->GetTimeStamp());
        }
      }

      // In TrackRunBox, there should be exactly one type, either audio or video.
      MOZ_ASSERT((mTrackType & Video_Track) ^ (mTrackType & Audio_Track));
      sample_info_table[i].sample_duration = (mTrackType & Video_Track ?
        frame_time * mVideoMeta->GetVideoClockRate() / USECS_PER_S :
        frame_time * mAudioMeta->GetAudioSampleRate() / USECS_PER_S);

      table_size += sizeof(uint32_t);
    }

    sample_info_table[i].sample_composition_time_offset = 0;
  }
  return table_size;
}