コード例 #1
0
nsresult
VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData)
{
  vpx_codec_iter_t iter = nullptr;
  EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME;
  nsTArray<uint8_t> frameData;
  nsresult rv;
  const vpx_codec_cx_pkt_t *pkt = nullptr;
  while ((pkt = vpx_codec_get_cx_data(mVPXContext, &iter)) != nullptr) {
    switch (pkt->kind) {
      case VPX_CODEC_CX_FRAME_PKT: {
        // Copy the encoded data from libvpx to frameData
        frameData.AppendElements((uint8_t*)pkt->data.frame.buf,
                                 pkt->data.frame.sz);
        break;
      }
      default: {
        break;
      }
    }
    // End of frame
    if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
      if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
        frameType = EncodedFrame::VP8_I_FRAME;
      }
      break;
    }
  }

  if (!frameData.IsEmpty() &&
      (pkt->data.frame.pts == mEncodedTimestamp)) {
    // Copy the encoded data to aData.
    EncodedFrame* videoData = new EncodedFrame();
    videoData->SetFrameType(frameType);
    // Convert the timestamp and duration to Usecs.
    CheckedInt64 timestamp = FramesToUsecs(mEncodedTimestamp, mTrackRate);
    if (timestamp.isValid()) {
      videoData->SetTimeStamp(
        (uint64_t)FramesToUsecs(mEncodedTimestamp, mTrackRate).value());
    }
    CheckedInt64 duration = FramesToUsecs(pkt->data.frame.duration, mTrackRate);
    if (duration.isValid()) {
      videoData->SetDuration(
        (uint64_t)FramesToUsecs(pkt->data.frame.duration, mTrackRate).value());
    }
    rv = videoData->SwapInFrameData(frameData);
    NS_ENSURE_SUCCESS(rv, rv);
    VP8LOG("GetEncodedPartitions TimeStamp %lld Duration %lld\n",
           videoData->GetTimeStamp(), videoData->GetDuration());
    VP8LOG("frameType %d\n", videoData->GetFrameType());
    aData.AppendEncodedFrame(videoData);
  }

  return NS_OK;
}
コード例 #2
0
ファイル: TestWebMWriter.cpp プロジェクト: 70599/Waterfox
 // When we append an I-Frame into WebM muxer, the muxer will treat previous
 // data as "a cluster".
 // In these test cases, we will call the function many times to enclose the
 // previous cluster so that we can retrieve data by |GetContainerData|.
 void AppendDummyFrame(EncodedFrame::FrameType aFrameType,
                       uint64_t aDuration) {
   EncodedFrameContainer encodedVideoData;
   nsTArray<uint8_t> frameData;
   RefPtr<EncodedFrame> videoData = new EncodedFrame();
   // Create dummy frame data.
   frameData.SetLength(FIXED_FRAMESIZE);
   videoData->SetFrameType(aFrameType);
   videoData->SetTimeStamp(mTimestamp);
   videoData->SetDuration(aDuration);
   videoData->SwapInFrameData(frameData);
   encodedVideoData.AppendEncodedFrame(videoData);
   WriteEncodedTrack(encodedVideoData, 0);
   mTimestamp += aDuration;
 }
コード例 #3
0
void
VorbisTrackEncoder::GetEncodedFrames(EncodedFrameContainer& aData)
{
  // vorbis does some data preanalysis, then divvies up blocks for
  // more involved (potentially parallel) processing. Get a single
  // block for encoding now.
  while (vorbis_analysis_blockout(&mVorbisDsp, &mVorbisBlock) == 1) {
    ogg_packet oggPacket;
    if (vorbis_analysis(&mVorbisBlock, &oggPacket) == 0) {
      VORBISLOG("vorbis_analysis_blockout block size %d", oggPacket.bytes);
      EncodedFrame* audiodata = new EncodedFrame();
      audiodata->SetFrameType(EncodedFrame::AUDIO_FRAME);
      nsTArray<uint8_t> frameData;
      frameData.AppendElements(oggPacket.packet, oggPacket.bytes);
      audiodata->SetFrameData(&frameData);
      aData.AppendEncodedFrame(audiodata);
    }
  }
}
コード例 #4
0
ファイル: OmxTrackEncoder.cpp プロジェクト: L2-D2/gecko-dev
nsresult
OmxAudioTrackEncoder::AppendEncodedFrames(EncodedFrameContainer& aContainer)
{
  nsTArray<uint8_t> frameData;
  int outFlags = 0;
  int64_t outTimeUs = -1;

  nsresult rv = mEncoder->GetNextEncodedFrame(&frameData, &outTimeUs, &outFlags,
                                              3000); // wait up to 3ms
  NS_ENSURE_SUCCESS(rv, rv);

  if (!frameData.IsEmpty()) {
    bool isCSD = false;
    if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) { // codec specific data
      isCSD = true;
    } else if (outFlags & OMXCodecWrapper::BUFFER_EOS) { // last frame
      mEncodingComplete = true;
    }

    nsRefPtr<EncodedFrame> audiodata = new EncodedFrame();
    if (mEncoder->GetCodecType() == OMXCodecWrapper::AAC_ENC) {
      audiodata->SetFrameType(isCSD ?
        EncodedFrame::AAC_CSD : EncodedFrame::AAC_AUDIO_FRAME);
    } else if (mEncoder->GetCodecType() == OMXCodecWrapper::AMR_NB_ENC){
      audiodata->SetFrameType(isCSD ?
        EncodedFrame::AMR_AUDIO_CSD : EncodedFrame::AMR_AUDIO_FRAME);
    } else {
      MOZ_ASSERT(false, "audio codec not supported");
    }
    audiodata->SetTimeStamp(outTimeUs);
    audiodata->SwapInFrameData(frameData);
    aContainer.AppendEncodedFrame(audiodata);
  }

  return NS_OK;
}
コード例 #5
0
ファイル: OmxTrackEncoder.cpp プロジェクト: L2-D2/gecko-dev
nsresult
OmxVideoTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  VideoSegment segment;
  {
    // Move all the samples from mRawSegment to segment. We only hold the
    // monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait if mEncoder is not initialized nor is being canceled.
    while (!mCanceled && (!mInitialized ||
          (mRawSegment.GetDuration() == 0 && !mEndOfStream))) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    segment.AppendFrom(&mRawSegment);
  }

  nsresult rv;
  // Start queuing raw frames to the input buffers of OMXCodecWrapper.
  VideoSegment::ChunkIterator iter(segment);
  while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;

    // Send only the unique video frames to OMXCodecWrapper.
    if (mLastFrame != chunk.mFrame) {
      uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
      layers::Image* img = (chunk.IsNull() || chunk.mFrame.GetForceBlack()) ?
                           nullptr : chunk.mFrame.GetImage();
      rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs);
      NS_ENSURE_SUCCESS(rv, rv);
    }

    mLastFrame.TakeFrom(&chunk.mFrame);
    mTotalFrameDuration += chunk.GetDuration();

    iter.Next();
  }

  // Send the EOS signal to OMXCodecWrapper.
  if (mEndOfStream && iter.IsEnded() && !mEosSetInEncoder) {
    uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
    layers::Image* img = (!mLastFrame.GetImage() || mLastFrame.GetForceBlack())
                         ? nullptr : mLastFrame.GetImage();
    rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs,
                          OMXCodecWrapper::BUFFER_EOS);
    NS_ENSURE_SUCCESS(rv, rv);

    // Keep sending EOS signal until OMXVideoEncoder gets it.
    mEosSetInEncoder = true;
  }

  // Dequeue an encoded frame from the output buffers of OMXCodecWrapper.
  nsTArray<uint8_t> buffer;
  int outFlags = 0;
  int64_t outTimeStampUs = 0;
  rv = mEncoder->GetNextEncodedFrame(&buffer, &outTimeStampUs, &outFlags,
                                     GET_ENCODED_VIDEO_FRAME_TIMEOUT);
  NS_ENSURE_SUCCESS(rv, rv);
  if (!buffer.IsEmpty()) {
    nsRefPtr<EncodedFrame> videoData = new EncodedFrame();
    if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) {
      videoData->SetFrameType(EncodedFrame::AVC_CSD);
    } else {
      videoData->SetFrameType((outFlags & OMXCodecWrapper::BUFFER_SYNC_FRAME) ?
                              EncodedFrame::AVC_I_FRAME : EncodedFrame::AVC_P_FRAME);
    }
    videoData->SwapInFrameData(buffer);
    videoData->SetTimeStamp(outTimeStampUs);
    aData.AppendEncodedFrame(videoData);
  }

  if (outFlags & OMXCodecWrapper::BUFFER_EOS) {
    mEncodingComplete = true;
    OMX_LOG("Done encoding video.");
  }

  return NS_OK;
}
コード例 #6
0
nsresult
OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  PROFILER_LABEL("OpusTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);
  {
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);
    // Wait until initialized or cancelled.
    while (!mCanceled && !mInitialized) {
      mReentrantMonitor.Wait();
    }
    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }
  }

  // calculation below depends on the truth that mInitialized is true.
  MOZ_ASSERT(mInitialized);

  // re-sampled frames left last time which didn't fit into an Opus packet duration.
  const int framesLeft = mResampledLeftover.Length() / mChannels;
  // When framesLeft is 0, (GetPacketDuration() - framesLeft) is a multiple
  // of kOpusSamplingRate. There is not precision loss in the integer division
  // in computing framesToFetch. If frameLeft > 0, we need to add 1 to
  // framesToFetch to ensure there will be at least n frames after re-sampling.
  const int frameRoundUp = framesLeft ? 1 : 0;

  MOZ_ASSERT(GetPacketDuration() >= framesLeft);
  // Try to fetch m frames such that there will be n frames
  // where (n + frameLeft) >= GetPacketDuration() after re-sampling.
  const int framesToFetch = !mResampler ? GetPacketDuration()
    : (GetPacketDuration() - framesLeft) * mSamplingRate / kOpusSamplingRate
      + frameRoundUp;
  {
    // Move all the samples from mRawSegment to mSourceSegment. We only hold
    // the monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait until enough raw data, end of stream or cancelled.
    while (!mCanceled && mRawSegment.GetDuration() +
        mSourceSegment.GetDuration() < framesToFetch &&
        !mEndOfStream) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    mSourceSegment.AppendFrom(&mRawSegment);

    // Pad |mLookahead| samples to the end of source stream to prevent lost of
    // original data, the pcm duration will be calculated at rate 48K later.
    if (mEndOfStream && !mEosSetInEncoder) {
      mEosSetInEncoder = true;
      mSourceSegment.AppendNullData(mLookahead);
    }
  }

  // Start encoding data.
  nsAutoTArray<AudioDataValue, 9600> pcm;
  pcm.SetLength(GetPacketDuration() * mChannels);
  AudioSegment::ChunkIterator iter(mSourceSegment);
  int frameCopied = 0;

  while (!iter.IsEnded() && frameCopied < framesToFetch) {
    AudioChunk chunk = *iter;

    // Chunk to the required frame size.
    int frameToCopy = chunk.GetDuration();
    if (frameCopied + frameToCopy > framesToFetch) {
      frameToCopy = framesToFetch - frameCopied;
    }

    if (!chunk.IsNull()) {
      // Append the interleaved data to the end of pcm buffer.
      AudioTrackEncoder::InterleaveTrackData(chunk, frameToCopy, mChannels,
        pcm.Elements() + frameCopied * mChannels);
    } else {
      memset(pcm.Elements() + frameCopied * mChannels, 0,
             frameToCopy * mChannels * sizeof(AudioDataValue));
    }

    frameCopied += frameToCopy;
    iter.Next();
  }

  RefPtr<EncodedFrame> audiodata = new EncodedFrame();
  audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
  int framesInPCM = frameCopied;
  if (mResampler) {
    nsAutoTArray<AudioDataValue, 9600> resamplingDest;
    // We want to consume all the input data, so we slightly oversize the
    // resampled data buffer so we can fit the output data in. We cannot really
    // predict the output frame count at each call.
    uint32_t outframes = frameCopied * kOpusSamplingRate / mSamplingRate + 1;
    uint32_t inframes = frameCopied;

    resamplingDest.SetLength(outframes * mChannels);

#if MOZ_SAMPLE_TYPE_S16
    short* in = reinterpret_cast<short*>(pcm.Elements());
    short* out = reinterpret_cast<short*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_int(mResampler, in, &inframes,
                                                        out, &outframes);
#else
    float* in = reinterpret_cast<float*>(pcm.Elements());
    float* out = reinterpret_cast<float*>(resamplingDest.Elements());
    speex_resampler_process_interleaved_float(mResampler, in, &inframes,
                                                          out, &outframes);
#endif

    MOZ_ASSERT(pcm.Length() >= mResampledLeftover.Length());
    PodCopy(pcm.Elements(), mResampledLeftover.Elements(),
        mResampledLeftover.Length());

    uint32_t outframesToCopy = std::min(outframes,
        static_cast<uint32_t>(GetPacketDuration() - framesLeft));

    MOZ_ASSERT(pcm.Length() - mResampledLeftover.Length() >=
        outframesToCopy * mChannels);
    PodCopy(pcm.Elements() + mResampledLeftover.Length(),
        resamplingDest.Elements(), outframesToCopy * mChannels);
    int frameLeftover = outframes - outframesToCopy;
    mResampledLeftover.SetLength(frameLeftover * mChannels);
    PodCopy(mResampledLeftover.Elements(),
        resamplingDest.Elements() + outframesToCopy * mChannels,
        mResampledLeftover.Length());
    // This is always at 48000Hz.
    framesInPCM = framesLeft + outframesToCopy;
    audiodata->SetDuration(framesInPCM);
  } else {
    // The ogg time stamping and pre-skip is always timed at 48000.
    audiodata->SetDuration(frameCopied * (kOpusSamplingRate / mSamplingRate));
  }

  // Remove the raw data which has been pulled to pcm buffer.
  // The value of frameCopied should equal to (or smaller than, if eos)
  // GetPacketDuration().
  mSourceSegment.RemoveLeading(frameCopied);

  // Has reached the end of input stream and all queued data has pulled for
  // encoding.
  if (mSourceSegment.GetDuration() == 0 && mEndOfStream) {
    mEncodingComplete = true;
    LOG("[Opus] Done encoding.");
  }

  MOZ_ASSERT(mEndOfStream || framesInPCM == GetPacketDuration());

  // Append null data to pcm buffer if the leftover data is not enough for
  // opus encoder.
  if (framesInPCM < GetPacketDuration() && mEndOfStream) {
    PodZero(pcm.Elements() + framesInPCM * mChannels,
        (GetPacketDuration() - framesInPCM) * mChannels);
  }
  nsTArray<uint8_t> frameData;
  // Encode the data with Opus Encoder.
  frameData.SetLength(MAX_DATA_BYTES);
  // result is returned as opus error code if it is negative.
  int result = 0;
#ifdef MOZ_SAMPLE_TYPE_S16
  const opus_int16* pcmBuf = static_cast<opus_int16*>(pcm.Elements());
  result = opus_encode(mEncoder, pcmBuf, GetPacketDuration(),
                       frameData.Elements(), MAX_DATA_BYTES);
#else
  const float* pcmBuf = static_cast<float*>(pcm.Elements());
  result = opus_encode_float(mEncoder, pcmBuf, GetPacketDuration(),
                             frameData.Elements(), MAX_DATA_BYTES);
#endif
  frameData.SetLength(result >= 0 ? result : 0);

  if (result < 0) {
    LOG("[Opus] Fail to encode data! Result: %s.", opus_strerror(result));
  }
  if (mEncodingComplete) {
    if (mResampler) {
      speex_resampler_destroy(mResampler);
      mResampler = nullptr;
    }
    mResampledLeftover.SetLength(0);
  }

  audiodata->SwapInFrameData(frameData);
  aData.AppendEncodedFrame(audiodata);
  return result >= 0 ? NS_OK : NS_ERROR_FAILURE;
}